From noreply at buildbot.pypy.org Thu Mar 1 01:22:09 2012 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Mar 2012 01:22:09 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: work some more on slides Message-ID: <20120301002209.24D788204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4110:b71362d9af8d Date: 2012-02-29 16:21 -0800 http://bitbucket.org/pypy/extradoc/changeset/b71362d9af8d/ Log: work some more on slides diff --git a/talk/pycon2012/tutorial/slides.rst b/talk/pycon2012/tutorial/slides.rst --- a/talk/pycon2012/tutorial/slides.rst +++ b/talk/pycon2012/tutorial/slides.rst @@ -10,7 +10,11 @@ |pause| -If it's not faster, you're wasting ime. +If it's not faster, you're wasting time. + +|pause| + +But if you iterate fast, you can afford wasting time Third rule of optimization? =========================== @@ -32,6 +36,8 @@ * Avoiding function calls +* Don't write Python + Forget these ============ @@ -39,6 +45,8 @@ * Which we're going to learn about now +* You cannot speak about operations in isolation (more later) + Why PyPy? ========= @@ -89,7 +97,7 @@ * **simple** python -* if you can't understand it, JIT won't either +* if I can't understand it, JIT won't either How PyPy runs your program, involved parts ========================================== @@ -111,7 +119,7 @@ * .... goes on and on -* XXX example 1 +* example Tracing JIT =========== @@ -119,11 +127,11 @@ * once the loop gets hot, it's starting tracing (1039 runs, or 1619 function calls) -* generating operations following how the interpreter would execute them +* generating operations following how the interpreter executes them * optimizing them -* compiling to assembler (x86 only for now) +* compiling to assembler (x86, ppc or arm) PyPy's specific features ======================== @@ -134,30 +142,66 @@ * Decent tools for inspecting the generated code -XXXXXXXXXXXXXXXXXXXXXXXXXXXX +Performance characteristics - runtime +===================================== +* Runtime the same or a bit slower as CPython -* Sweetspot? +* Examples of runtime: - * CPython's sweetspot: stuff written in C + * ``list.sort`` - * PyPy's sweetspot: lots of stuff written in Python + * ``long + long`` -* http://speed.pypy.org + * ``set & set`` -* How do you hit the sweetspot? + * ``unicode.join`` - * Be in this room for the next 3 hours. + * ... -Memory -====== +Performance characteristics - JIT +================================= -* PyPy memory usage is difficult to estimate. -* Very program dependent. -* Learn to predict! +* Important notion - don't consider operations in separation -Sandbox -======= +* Always working as a loop or as a function -* We're not going to talk about it here. -* Run untrusted code. +* Heuristics to what we believe is common python + +* Often much faster than CPython once warm + +Heuristics +========== + +* What to specialize on (assuming stuff is constant) + +* Data structures + +* Relative cost of operations + +Heuristic example - dicts vs objects +==================================== + +* Dicts - an unknown set of keys, potentially large + +* Objects - a relatively stable, constant set of keys + (but not enforced) + +* Performance example + +Specialized lists +================= + +* XXX Example of how much speedup you get out of not mixing + +Itertools abuse +=============== + +XXX + +Obscure stuff +============= + +* Frame access is slow + +* List comprehension vs generator expression From noreply at buildbot.pypy.org Thu Mar 1 02:00:36 2012 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Mar 2012 02:00:36 +0100 (CET) Subject: [pypy-commit] jitviewer default: a bit more robust against IOErrors Message-ID: <20120301010036.BFF7D8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r190:0af571b42677 Date: 2012-02-29 16:55 -0800 http://bitbucket.org/pypy/jitviewer/changeset/0af571b42677/ Log: a bit more robust against IOErrors diff --git a/_jitviewer/app.py b/_jitviewer/app.py --- a/_jitviewer/app.py +++ b/_jitviewer/app.py @@ -150,19 +150,16 @@ source = CodeReprNoFile(loop) else: startline, endline = loop.linerange - code = self.storage.load_code(loop.filename)[(loop.startlineno, - loop.name)] - if code.co_name == '' and code.co_firstlineno == 1: - try: + try: + code = self.storage.load_code(loop.filename)[(loop.startlineno, + loop.name)] + if code.co_name == '' and code.co_firstlineno == 1: with open(code.co_filename) as f: source = CodeRepr(f.read(), code, loop) - except (IOError, OSError): - source = CodeReprNoFile(loop) - else: - #try: - source = CodeRepr(inspect.getsource(code), code, loop) - #except: - # source = CodeReprNoFile(loop) + else: + source = CodeRepr(inspect.getsource(code), code, loop) + except (IOError, OSError): + source = CodeReprNoFile(loop) d = {'html': flask.render_template('loop.html', source=source, current_loop=name, From noreply at buildbot.pypy.org Thu Mar 1 02:00:37 2012 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Mar 2012 02:00:37 +0100 (CET) Subject: [pypy-commit] jitviewer default: make bridges more prominent Message-ID: <20120301010037.D1A798204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r191:bb4618bf643e Date: 2012-02-29 17:00 -0800 http://bitbucket.org/pypy/jitviewer/changeset/bb4618bf643e/ Log: make bridges more prominent diff --git a/_jitviewer/static/style.css b/_jitviewer/static/style.css --- a/_jitviewer/static/style.css +++ b/_jitviewer/static/style.css @@ -224,6 +224,10 @@ display: none; } +.bridgelink { + font-weight: bold; +} + /* End of Formatting -----------------------------------------*/ diff --git a/_jitviewer/templates/loop.html b/_jitviewer/templates/loop.html --- a/_jitviewer/templates/loop.html +++ b/_jitviewer/templates/loop.html @@ -13,7 +13,7 @@ {% for op in chunk.operations %} {% if op.name != "debug_merge_point" %} {% if op.bridge %} - {{op.html_repr()}} show bridge  (run {{op.count}} times)
+ {{op.html_repr()}} show bridge  (run {{op.count}} times)
{% if op.asm %}

{{op.asm}}

{% endif %} From noreply at buildbot.pypy.org Thu Mar 1 08:51:57 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Thu, 1 Mar 2012 08:51:57 +0100 (CET) Subject: [pypy-commit] pypy pytest: merge default Message-ID: <20120301075157.515EC8204C@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: pytest Changeset: r53035:e6568e325b8e Date: 2012-03-01 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/e6568e325b8e/ Log: merge default diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py --- a/lib-python/modified-2.7/ctypes/test/test_arrays.py +++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py @@ -1,12 +1,23 @@ import unittest from ctypes import * +from test.test_support import impl_detail formats = "bBhHiIlLqQfd" +# c_longdouble commented out for PyPy, look at the commend in test_longdouble formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ - c_long, c_ulonglong, c_float, c_double, c_longdouble + c_long, c_ulonglong, c_float, c_double #, c_longdouble class ArrayTestCase(unittest.TestCase): + + @impl_detail('long double not supported by PyPy', pypy=False) + def test_longdouble(self): + """ + This test is empty. It's just here to remind that we commented out + c_longdouble in "formats". If pypy will ever supports c_longdouble, we + should kill this test and uncomment c_longdouble inside formats. + """ + def test_simple(self): # create classes holding simple numeric types, and check # various properties. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,9 +1,9 @@ - +import _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import CArgObject +from _ctypes.basics import CArgObject, as_ffi_pointer class ArrayMeta(_CDataMeta): def __new__(self, name, cls, typedict): @@ -211,6 +211,9 @@ def _to_ffi_param(self): return self._get_buffer_value() + def _as_ffi_pointer_(self, ffitype): + return as_ffi_pointer(self, ffitype) + ARRAY_CACHE = {} def create_array_type(base, length): @@ -228,5 +231,6 @@ _type_ = base ) cls = ArrayMeta(name, (Array,), tpdict) + cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype()) ARRAY_CACHE[key] = cls return cls diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -230,5 +230,16 @@ } +# called from primitive.py, pointer.py, array.py +def as_ffi_pointer(value, ffitype): + my_ffitype = type(value).get_ffi_argtype() + # for now, we always allow types.pointer, else a lot of tests + # break. We need to rethink how pointers are represented, though + if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) + return value._get_buffer_value() + + # used by "byref" from _ctypes.pointer import pointer diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -3,7 +3,7 @@ import _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import sizeof, byref +from _ctypes.basics import sizeof, byref, as_ffi_pointer from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\ array_slice_setitem @@ -119,14 +119,6 @@ def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) -def as_ffi_pointer(value, ffitype): - my_ffitype = type(value).get_ffi_argtype() - # for now, we always allow types.pointer, else a lot of tests - # break. We need to rethink how pointers are represented, though - if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError("expected %s instance, got %s" % (type(value), - ffitype)) - return value._get_buffer_value() def _cast_addr(obj, _, tp): if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -313,5 +313,10 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* the ``__dict__`` attribute of new-style classes returns a normal dict, as + opposed to a dict proxy like in CPython. Mutating the dict will change the + type and vice versa. For builtin types, a dictionary will be returned that + cannot be changed (but still looks and behaves like a normal dictionary). + .. include:: _ref.txt diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -328,7 +328,7 @@ raise modname = self.str_w(w_modname) mod = self.interpclass_w(w_mod) - if isinstance(mod, Module): + if isinstance(mod, Module) and not mod.startup_called: self.timer.start("startup " + modname) mod.init(self) self.timer.stop("startup " + modname) @@ -1471,8 +1471,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -322,3 +322,14 @@ space.ALL_BUILTIN_MODULES.pop() del space._builtinmodule_list mods = space.get_builtinmodule_to_install() + + def test_dont_reload_builtin_mods_on_startup(self): + from pypy.tool.option import make_config, make_objspace + config = make_config(None) + space = make_objspace(config) + w_executable = space.wrap('executable') + assert space.str_w(space.getattr(space.sys, w_executable)) == 'py.py' + space.setattr(space.sys, w_executable, space.wrap('foobar')) + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' + space.startup() + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -17,14 +17,14 @@ def test_executable(): """Ensures sys.executable points to the py.py script""" # TODO : watch out for spaces/special chars in pypypath - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.executable") assert output.splitlines()[-1] == pypypath def test_special_names(): """Test the __name__ and __file__ special global names""" cmd = "print __name__; print '__file__' in globals()" - output = run(sys.executable, pypypath, '-c', cmd) + output = run(sys.executable, pypypath, '-S', '-c', cmd) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == 'False' @@ -33,24 +33,24 @@ tmpfile.write("print __name__; print __file__\n") tmpfile.close() - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == str(tmpfilepath) def test_argv_command(): """Some tests on argv""" # test 1 : no arguments - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv") assert output.splitlines()[-1] == str(['-c']) # test 2 : some arguments after - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-O", "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) @@ -65,15 +65,15 @@ tmpfile.close() # test 1 : no arguments - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-1] == str([tmpfilepath]) # test 2 : some arguments after - output = run(sys.executable, pypypath, tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, "-O", tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', "-O", tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) @@ -95,7 +95,7 @@ tmpfile.write(TB_NORMALIZATION_CHK) tmpfile.close() - popen = subprocess.Popen([sys.executable, str(pypypath), tmpfilepath], + popen = subprocess.Popen([sys.executable, str(pypypath), '-S', tmpfilepath], stderr=subprocess.PIPE) _, stderr = popen.communicate() assert stderr.endswith('KeyError: \n') diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,7 +1,6 @@ import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup @@ -770,11 +769,19 @@ self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) - # Rarely called: allocate a fixed-size amount of bytes, but - # not in the nursery, because it is too big. Implemented like - # malloc_nursery_slowpath() above. - self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, - [lltype.Signed]) + # Never called as far as I can tell, but there for completeness: + # allocate a fixed-size object, but not in the nursery, because + # it is too big. + def malloc_big_fixedsize(size, tid): + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) def _bh_malloc(self, sizedescr): from pypy.rpython.memory.gctypelayout import check_typeid diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -96,8 +96,10 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - self.gen_malloc_nursery(size, op.result) - self.gen_initialize_tid(op.result, descr.tid) + if self.gen_malloc_nursery(size, op.result): + self.gen_initialize_tid(op.result, descr.tid) + else: + self.gen_malloc_fixedsize(size, descr.tid, op.result) def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) @@ -112,8 +114,8 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily - self.gen_malloc_nursery(total_size, op.result) + if (total_size >= 0 and + self.gen_malloc_nursery(total_size, op.result)): self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) elif self.gc_ll_descr.kind == 'boehm': @@ -147,13 +149,22 @@ # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None - def gen_malloc_fixedsize(self, size, v_result): - """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). - Note that with the framework GC, this should be called very rarely. + def gen_malloc_fixedsize(self, size, typeid, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). + Used on Boehm, and on the framework GC for large fixed-size + mallocs. (For all I know this latter case never occurs in + practice, but better safe than sorry.) """ - addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') - self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, - self.gc_ll_descr.malloc_fixedsize_descr) + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + assert (size & (WORD-1)) == 0, "size not aligned?" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + else: # Boehm + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + args = [ConstInt(addr), ConstInt(size)] + descr = self.gc_ll_descr.malloc_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -211,8 +222,7 @@ """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_fixedsize(size, v_result) - return + return False # op = None if self._op_malloc_nursery is not None: @@ -238,6 +248,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -119,12 +119,19 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=alendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + 10, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(adescr.basesize + 10 * adescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=alendescr) def test_new_array_variable(self): self.check_rewrite(""" @@ -178,13 +185,20 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=unicodelendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(unicodedescr.basesize)d, \ + 10, \ + %(unicodedescr.itemsize)d, \ + %(unicodelendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(unicodedescr.basesize + \ +## 10 * unicodedescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=unicodelendescr) class TestFramework(RewriteTests): @@ -203,7 +217,7 @@ # class FakeCPU(object): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(102) + descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -368,11 +382,9 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 104)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 103, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 103, \ + descr=malloc_array_descr) jump() """) @@ -435,9 +447,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 9315, descr=tiddescr) + p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ + descr=malloc_big_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.jit.backend.x86.arch import WORD def values_array(TP, size): @@ -37,8 +38,13 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None + # XXX check for SSE2 on win32 too else: + if WORD == 4: + extra = ['-DPYPY_X86_CHECK_SSE2'] + else: + extra = [] ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'], + '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra, )) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py --- a/pypy/jit/backend/x86/test/test_ztranslation.py +++ b/pypy/jit/backend/x86/test/test_ztranslation.py @@ -52,6 +52,7 @@ set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) + j = float(j) while frame.i > 3: jitdriver.can_enter_jit(frame=frame, total=total, j=j) jitdriver.jit_merge_point(frame=frame, total=total, j=j) diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -289,8 +289,21 @@ assert isinstance(token, TargetToken) assert token.original_jitcell_token is None token.original_jitcell_token = trace.original_jitcell_token - - + + +def do_compile_loop(metainterp_sd, inputargs, operations, looptoken, + log=True, name=''): + metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, + 'compiling', name=name) + return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + log=log, name=name) + +def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, + original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2) + return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -319,9 +332,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - original_jitcell_token, - name=loopname) + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -333,7 +346,6 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: @@ -365,9 +377,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs, - operations, - original_loop_token) + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -18,6 +18,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif number == -2: + debug_start("jit-log-compiling-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, @@ -31,6 +35,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif number == -2: + debug_start("jit-log-compiling-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -567,7 +567,7 @@ assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) try: - newboxes = modifier.finish(self.values, self.pendingfields) + newboxes = modifier.finish(self, self.pendingfields) if len(newboxes) > self.metainterp_sd.options.failargs_limit: raise resume.TagOverflow except resume.TagOverflow: diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,40 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_issue1045(self): + ops = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) + jump(i55) + """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -423,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) @@ -457,7 +491,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7796,6 +7796,23 @@ """ self.optimize_loop(ops, expected) + def test_issue1048_ok(self): + ops = """ + [p1, i2, i3] + p16 = getfield_gc(p1, descr=nextdescr) + call(p16, descr=nonwritedescr) + guard_true(i2) [p16] + setfield_gc(p1, ConstPtr(myptr), descr=nextdescr) + jump(p1, i3, i2) + """ + expected = """ + [p1, i3] + call(ConstPtr(myptr), descr=nonwritedescr) + guard_true(i3) [] + jump(p1, 1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -260,7 +260,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,7 +268,9 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) if self.optimizer.loop.logops: diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2349,7 +2349,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -182,23 +182,22 @@ # env numbering - def number(self, values, snapshot): + def number(self, optimizer, snapshot): if snapshot is None: return lltype.nullptr(NUMBERING), {}, 0 if snapshot in self.numberings: numb, liveboxes, v = self.numberings[snapshot] return numb, liveboxes.copy(), v - numb1, liveboxes, v = self.number(values, snapshot.prev) + numb1, liveboxes, v = self.number(optimizer, snapshot.prev) n = len(liveboxes)-v boxes = snapshot.boxes length = len(boxes) numb = lltype.malloc(NUMBERING, length) for i in range(length): box = boxes[i] - value = values.get(box, None) - if value is not None: - box = value.get_key_box() + value = optimizer.getvalue(box) + box = value.get_key_box() if isinstance(box, Const): tagged = self.getconst(box) @@ -318,14 +317,14 @@ _, tagbits = untag(tagged) return tagbits == TAGVIRTUAL - def finish(self, values, pending_setfields=[]): + def finish(self, optimizer, pending_setfields=[]): # compute the numbering storage = self.storage # make sure that nobody attached resume data to this guard yet assert not storage.rd_numb snapshot = storage.rd_snapshot assert snapshot is not None # is that true? - numb, liveboxes_from_env, v = self.memo.number(values, snapshot) + numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb @@ -341,23 +340,23 @@ liveboxes[i] = box else: assert tagbits == TAGVIRTUAL - value = values[box] + value = optimizer.getvalue(box) value.get_args_for_fail(self) for _, box, fieldbox, _ in pending_setfields: self.register_box(box) self.register_box(fieldbox) - value = values[fieldbox] + value = optimizer.getvalue(fieldbox) value.get_args_for_fail(self) - self._number_virtuals(liveboxes, values, v) + self._number_virtuals(liveboxes, optimizer, v) self._add_pending_fields(pending_setfields) storage.rd_consts = self.memo.consts dump_storage(storage, liveboxes) return liveboxes[:] - def _number_virtuals(self, liveboxes, values, num_env_virtuals): + def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals): # !! 'liveboxes' is a list that is extend()ed in-place !! memo = self.memo new_liveboxes = [None] * memo.num_cached_boxes() @@ -397,7 +396,7 @@ memo.nvholes += length - len(vfieldboxes) for virtualbox, fieldboxes in vfieldboxes.iteritems(): num, _ = untag(self.liveboxes[virtualbox]) - value = values[virtualbox] + value = optimizer.getvalue(virtualbox) fieldnums = [self._gettagged(box) for box in fieldboxes] vinfo = value.make_virtual_info(self, fieldnums) @@ -1102,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: @@ -2943,11 +3002,18 @@ self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) + myjitdriver = JitDriver(greens = [], reds = ['n', 'a']) + class A: + pass def f(n): sa = i = rffi.cast(rffi.ULONGLONG, 1) + a = A() while i < rffi.cast(rffi.ULONGLONG, n): - myjitdriver.jit_merge_point(sa=sa, n=n, i=i) + a.sa = sa + a.i = i + myjitdriver.jit_merge_point(n=n, a=a) + sa = a.sa + i = a.i sa += sa % i i += 1 res = self.meta_interp(f, [32]) diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe def test_get_current_qmut_instance(): @@ -480,6 +480,32 @@ assert res == 1 self.check_jitcell_token_count(2) + def test_for_loop_array(self): + myjitdriver = JitDriver(greens=[], reds=["n", "i"]) + class Foo(object): + _immutable_fields_ = ["x?[*]"] + def __init__(self, x): + self.x = x + f = Foo([1, 3, 5, 6]) + @unroll_safe + def g(v): + for x in f.x: + if x & 1 == 0: + v += 1 + return v + def main(n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i) + i = g(i) + return i + res = self.meta_interp(main, [10]) + assert res == 10 + self.check_resops({ + "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2, + "guard_not_invalidated": 2 + }) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -18,6 +18,19 @@ rd_virtuals = None rd_pendingfields = None + +class FakeOptimizer(object): + def __init__(self, values): + self.values = values + + def getvalue(self, box): + try: + value = self.values[box] + except KeyError: + value = self.values[box] = OptValue(box) + return value + + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) assert tag(-3, 2) == rffi.r_short(-3<<2|2) @@ -500,7 +513,7 @@ capture_resumedata(fs, None, [], storage) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() b1t, b2t, b3t = [BoxInt(), BoxPtr(), BoxInt()] @@ -524,7 +537,7 @@ capture_resumedata(fs, [b4], [], storage) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() b1t, b2t, b3t, b4t = [BoxInt(), BoxPtr(), BoxInt(), BoxPtr()] @@ -553,10 +566,10 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes2 = modifier.finish({}) + liveboxes2 = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() @@ -617,7 +630,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage.rd_virtuals) == 1 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), tag(0, TAGCONST)] @@ -628,7 +641,7 @@ values = {b2: virtual_value(b2, b4, v6), b6: v6} memo.clear_box_virtual_numbers() modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes2 = modifier.finish(values) + liveboxes2 = modifier.finish(FakeOptimizer(values)) assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[0].fieldnums == [tag(len(liveboxes2)-1, TAGBOX), tag(-1, TAGVIRTUAL)] @@ -674,7 +687,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage.rd_virtuals) == 1 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), tag(0, TAGCONST)] @@ -684,7 +697,7 @@ capture_resumedata(fs, None, [], storage2) values[b4] = virtual_value(b4, b6, c4) modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[1].fieldnums == storage.rd_virtuals[0].fieldnums assert storage2.rd_virtuals[1] is storage.rd_virtuals[0] @@ -703,7 +716,7 @@ v1.setfield(LLtypeMixin.nextdescr, v2) values = {b1: v1, b2: v2} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert liveboxes == [b3] assert len(storage.rd_virtuals) == 2 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), @@ -776,7 +789,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - numb, liveboxes, v = memo.number({}, snap1) + numb, liveboxes, v = memo.number(FakeOptimizer({}), snap1) assert v == 0 assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -788,7 +801,7 @@ tag(0, TAGBOX), tag(2, TAGINT)] assert not numb.prev.prev - numb2, liveboxes2, v = memo.number({}, snap2) + numb2, liveboxes2, v = memo.number(FakeOptimizer({}), snap2) assert v == 0 assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -813,7 +826,8 @@ return self.virt # renamed - numb3, liveboxes3, v = memo.number({b3: FakeValue(False, c4)}, snap3) + numb3, liveboxes3, v = memo.number(FakeOptimizer({b3: FakeValue(False, c4)}), + snap3) assert v == 0 assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)} @@ -825,7 +839,8 @@ env4 = [c3, b4, b1, c3] snap4 = Snapshot(snap, env4) - numb4, liveboxes4, v = memo.number({b4: FakeValue(True, b4)}, snap4) + numb4, liveboxes4, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4)}), + snap4) assert v == 1 assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -837,8 +852,9 @@ env5 = [b1, b4, b5] snap5 = Snapshot(snap4, env5) - numb5, liveboxes5, v = memo.number({b4: FakeValue(True, b4), - b5: FakeValue(True, b5)}, snap5) + numb5, liveboxes5, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4), + b5: FakeValue(True, b5)}), + snap5) assert v == 2 assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -940,7 +956,7 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) assert storage.rd_snapshot is None cpu = MyCPU([]) reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage) @@ -954,14 +970,14 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - modifier.finish({}) + modifier.finish(FakeOptimizer({})) assert len(memo.consts) == 2 assert storage.rd_consts is memo.consts b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)] storage2 = make_storage(b1s, b2s, b3s) modifier2 = ResumeDataVirtualAdder(storage2, memo) - modifier2.finish({}) + modifier2.finish(FakeOptimizer({})) assert len(memo.consts) == 3 assert storage2.rd_consts is memo.consts @@ -1022,7 +1038,7 @@ val = FakeValue() values = {b1s: val, b2s: val} - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert storage.rd_snapshot is None b1t, b3t = [BoxInt(11), BoxInt(33)] newboxes = _resume_remap(liveboxes, [b1_2, b3s], b1t, b3t) @@ -1043,7 +1059,7 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) b2t, b3t = [BoxPtr(demo55o), BoxInt(33)] newboxes = _resume_remap(liveboxes, [b2s, b3s], b2t, b3t) metainterp = MyMetaInterp() @@ -1086,7 +1102,7 @@ values = {b2s: v2, b4s: v4} liveboxes = [] - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume @@ -1156,7 +1172,7 @@ modifier.register_virtual_fields(b2s, [b4s, c1s]) liveboxes = [] values = {b2s: v2} - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) dump_storage(storage, liveboxes) storage.rd_consts = memo.consts[:] storage.rd_numb = None @@ -1203,7 +1219,7 @@ v2.setfield(LLtypeMixin.bdescr, OptValue(b4s)) modifier.register_virtual_fields(b2s, [c1s, b4s]) liveboxes = [] - modifier._number_virtuals(liveboxes, {b2s: v2}, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer({b2s: v2}), 0) dump_storage(storage, liveboxes) storage.rd_consts = memo.consts[:] storage.rd_numb = None @@ -1249,7 +1265,7 @@ values = {b4s: v4, b2s: v2} liveboxes = [] - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s] modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s, -1)]) storage.rd_consts = memo.consts[:] diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -453,7 +453,7 @@ if sys.stdout == sys.__stdout__: import pdb; pdb.post_mortem(tb) raise e.__class__, e, tb - fatalerror('~~~ Crash in JIT! %s' % (e,), traceback=True) + fatalerror('~~~ Crash in JIT! %s' % (e,)) crash_in_jit._dont_inline_ = True if self.translator.rtyper.type_system.name == 'lltypesystem': diff --git a/pypy/jit/tl/tinyframe/tinyframe.py b/pypy/jit/tl/tinyframe/tinyframe.py --- a/pypy/jit/tl/tinyframe/tinyframe.py +++ b/pypy/jit/tl/tinyframe/tinyframe.py @@ -210,7 +210,7 @@ def repr(self): return "" % (self.outer.repr(), self.inner.repr()) -driver = JitDriver(greens = ['code', 'i'], reds = ['self'], +driver = JitDriver(greens = ['i', 'code'], reds = ['self'], virtualizables = ['self']) class Frame(object): diff --git a/pypy/module/_demo/test/test_sieve.py b/pypy/module/_demo/test/test_sieve.py new file mode 100644 --- /dev/null +++ b/pypy/module/_demo/test/test_sieve.py @@ -0,0 +1,12 @@ +from pypy.conftest import gettestobjspace + + +class AppTestSieve: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('_demo',)) + + def test_sieve(self): + import _demo + lst = _demo.sieve(100) + assert lst == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, + 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97] diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -28,6 +28,7 @@ } def init(self, space): + MixedModule.init(self, space) w_UnsupportedOperation = space.call_function( space.w_type, space.wrap('UnsupportedOperation'), @@ -35,3 +36,9 @@ space.newdict()) space.setattr(self, space.wrap('UnsupportedOperation'), w_UnsupportedOperation) + + def shutdown(self, space): + # at shutdown, flush all open streams. Ignore I/O errors. + from pypy.module._io.interp_iobase import get_autoflushher + get_autoflushher(space).flush_all(space) + diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,6 +5,8 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib.rstring import StringBuilder +from pypy.rlib import rweakref + DEFAULT_BUFFER_SIZE = 8192 @@ -43,6 +45,8 @@ self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False + self.streamholder = None # needed by AutoFlusher + get_autoflushher(space).add(self) def getdict(self, space): return self.w_dict @@ -98,6 +102,7 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True + get_autoflushher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -303,3 +308,60 @@ read = interp2app(W_RawIOBase.read_w), readall = interp2app(W_RawIOBase.readall_w), ) + + +# ------------------------------------------------------------ +# functions to make sure that all streams are flushed on exit +# ------------------------------------------------------------ + +class StreamHolder(object): + + def __init__(self, w_iobase): + self.w_iobase_ref = rweakref.ref(w_iobase) + w_iobase.autoflusher = self + + def autoflush(self, space): + w_iobase = self.w_iobase_ref() + if w_iobase is not None: + try: + space.call_method(w_iobase, 'flush') + except OperationError, e: + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): + raise + + +class AutoFlusher(object): + + def __init__(self, space): + self.streams = {} + + def add(self, w_iobase): + assert w_iobase.streamholder is None + holder = StreamHolder(w_iobase) + w_iobase.streamholder = holder + self.streams[holder] = None + + def remove(self, w_iobase): + holder = w_iobase.streamholder + if holder is not None: + del self.streams[holder] + + def flush_all(self, space): + while self.streams: + for streamholder in self.streams.keys(): + try: + del self.streams[streamholder] + except KeyError: + pass # key was removed in the meantime + else: + streamholder.autoflush(space) + + +def get_autoflushher(space): + return space.fromcache(AutoFlusher) + + diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -160,3 +160,42 @@ f.close() assert repr(f) == "<_io.FileIO [closed]>" +def test_flush_at_exit(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + from pypy.tool.udir import udir + + tmpfile = udir.join('test_flush_at_exit') + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([space.wrap(str(tmpfile))], """(tmpfile): + import io + f = io.open(tmpfile, 'w', encoding='ascii') + f.write('42') + # no flush() and no close() + import sys; sys._keepalivesomewhereobscure = f + """) + space.finish() + assert tmpfile.read() == '42' + +def test_flush_at_exit_IOError_and_ValueError(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([], """(): + import io + class MyStream(io.IOBase): + def flush(self): + raise IOError + + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + + s = MyStream() + s2 = MyStream2() + import sys; sys._keepalivesomewhereobscure = s + """) + space.finish() # the IOError has been ignored diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -385,6 +385,7 @@ "Tuple": "space.w_tuple", "List": "space.w_list", "Set": "space.w_set", + "FrozenSet": "space.w_frozenset", "Int": "space.w_int", "Bool": "space.w_bool", "Float": "space.w_float", @@ -406,7 +407,7 @@ }.items(): GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Int Long Dict Tuple Class'.split(): + for cpyname in 'Method List Long Dict Tuple Class'.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' 'Py%sObject' % (cpyname, )) build_exported_objects() diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -184,8 +184,10 @@ w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) state = space.fromcache(RefcountState) - pkey[0] = state.make_borrowed(w_dict, w_key) - pvalue[0] = state.make_borrowed(w_dict, w_value) + if pkey: + pkey[0] = state.make_borrowed(w_dict, w_key) + if pvalue: + pvalue[0] = state.make_borrowed(w_dict, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -1,16 +1,24 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler import consts from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno +from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling PyCompilerFlags = cpython_struct( - "PyCompilerFlags", ()) + "PyCompilerFlags", (("cf_flags", rffi.INT),)) PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags) +PyCF_MASK = (consts.CO_FUTURE_DIVISION | + consts.CO_FUTURE_ABSOLUTE_IMPORT | + consts.CO_FUTURE_WITH_STATEMENT | + consts.CO_FUTURE_PRINT_FUNCTION | + consts.CO_FUTURE_UNICODE_LITERALS) + @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) @@ -48,6 +56,17 @@ return None return borrow_from(None, caller.w_globals) + at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) +def PyEval_EvalCode(space, w_code, w_globals, w_locals): + """This is a simplified interface to PyEval_EvalCodeEx(), with just + the code object, and the dictionaries of global and local variables. + The other arguments are set to NULL.""" + if w_globals is None: + w_globals = space.w_None + if w_locals is None: + w_locals = space.w_None + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([PyObject, PyObject], PyObject) def PyObject_CallObject(space, w_obj, w_arg): """ @@ -74,7 +93,7 @@ Py_file_input = 257 Py_eval_input = 258 -def compile_string(space, source, filename, start): +def compile_string(space, source, filename, start, flags=0): w_source = space.wrap(source) start = rffi.cast(lltype.Signed, start) if start == Py_file_input: @@ -86,7 +105,7 @@ else: raise OperationError(space.w_ValueError, space.wrap( "invalid mode parameter for compilation")) - return compiling.compile(space, w_source, filename, mode) + return compiling.compile(space, w_source, filename, mode, flags) def run_string(space, source, filename, start, w_globals, w_locals): w_code = compile_string(space, source, filename, start) @@ -109,6 +128,24 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) + at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + PyCompilerFlagsPtr], PyObject) +def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): + """Execute Python source code from str in the context specified by the + dictionaries globals and locals with the compiler flags specified by + flags. The parameter start specifies the start token that should be used to + parse the source code. + + Returns the result of executing the code as a Python object, or NULL if an + exception was raised.""" + source = rffi.charp2str(source) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + w_code = compile_string(space, source, "", start, flags) + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([FILEP, CONST_STRING, rffi.INT_real, PyObject, PyObject], PyObject) def PyRun_File(space, fp, filename, start, w_globals, w_locals): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -150,7 +187,7 @@ @cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], PyObject) -def Py_CompileStringFlags(space, source, filename, start, flags): +def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the resulting code object. The start token is given by start; this can be used to constrain the code which can be compiled and should @@ -160,7 +197,30 @@ returns NULL if the code cannot be parsed or compiled.""" source = rffi.charp2str(source) filename = rffi.charp2str(filename) - if flags: - raise OperationError(space.w_NotImplementedError, space.wrap( - "cpyext Py_CompileStringFlags does not accept flags")) - return compile_string(space, source, filename, start) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + return compile_string(space, source, filename, start, flags) + + at cpython_api([PyCompilerFlagsPtr], rffi.INT_real, error=CANNOT_FAIL) +def PyEval_MergeCompilerFlags(space, cf): + """This function changes the flags of the current evaluation + frame, and returns true on success, false on failure.""" + flags = rffi.cast(lltype.Signed, cf.c_cf_flags) + result = flags != 0 + current_frame = space.getexecutioncontext().gettopframe_nohidden() + if current_frame: + codeflags = current_frame.pycode.co_flags + compilerflags = codeflags & PyCF_MASK + if compilerflags: + result = 1 + flags |= compilerflags + # No future keyword at the moment + # if codeflags & CO_GENERATOR_ALLOWED: + # result = 1 + # flags |= CO_GENERATOR_ALLOWED + cf.c_cf_flags = rffi.cast(rffi.INT, flags) + return result + + diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -1,6 +1,6 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - PyObjectFields, generic_cpy_call, CONST_STRING, + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) @@ -48,6 +48,7 @@ PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function) PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) +PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) def function_attach(space, py_obj, w_obj): py_func = rffi.cast(PyFunctionObject, py_obj) @@ -167,3 +168,9 @@ freevars=[], cellvars=[])) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCode_GetNumFree(space, w_co): + """Return the number of free variables in co.""" + co = space.interp_w(PyCode, w_co) + return len(co.co_freevars) + diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -113,6 +113,7 @@ #include "compile.h" #include "frameobject.h" #include "eval.h" +#include "pymath.h" #include "pymem.h" #include "pycobject.h" #include "pycapsule.h" diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h --- a/pypy/module/cpyext/include/code.h +++ b/pypy/module/cpyext/include/code.h @@ -13,13 +13,19 @@ /* Masks for co_flags above */ /* These values are also in funcobject.py */ -#define CO_OPTIMIZED 0x0001 -#define CO_NEWLOCALS 0x0002 -#define CO_VARARGS 0x0004 -#define CO_VARKEYWORDS 0x0008 +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 #define CO_NESTED 0x0010 #define CO_GENERATOR 0x0020 +#define CO_FUTURE_DIVISION 0x02000 +#define CO_FUTURE_ABSOLUTE_IMPORT 0x04000 +#define CO_FUTURE_WITH_STATEMENT 0x08000 +#define CO_FUTURE_PRINT_FUNCTION 0x10000 +#define CO_FUTURE_UNICODE_LITERALS 0x20000 + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -7,6 +7,11 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + long ob_ival; +} PyIntObject; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* diff --git a/pypy/module/cpyext/include/pymath.h b/pypy/module/cpyext/include/pymath.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/pymath.h @@ -0,0 +1,20 @@ +#ifndef Py_PYMATH_H +#define Py_PYMATH_H + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to mathematical +functions and constants +**************************************************************************/ + +/* HUGE_VAL is supposed to expand to a positive double infinity. Python + * uses Py_HUGE_VAL instead because some platforms are broken in this + * respect. We used to embed code in pyport.h to try to worm around that, + * but different platforms are broken in conflicting ways. If you're on + * a platform where HUGE_VAL is defined incorrectly, fiddle your Python + * config to #define Py_HUGE_VAL to something that works on your platform. + */ +#ifndef Py_HUGE_VAL +#define Py_HUGE_VAL HUGE_VAL +#endif + +#endif /* Py_PYMATH_H */ diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -19,6 +19,14 @@ int cf_flags; /* bitmask of CO_xxx flags relevant to future */ } PyCompilerFlags; +#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \ + CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \ + CO_FUTURE_UNICODE_LITERALS) +#define PyCF_MASK_OBSOLETE (CO_NESTED) +#define PyCF_SOURCE_IS_UTF8 0x0100 +#define PyCF_DONT_IMPLY_DEDENT 0x0200 +#define PyCF_ONLY_AST 0x0400 + #define Py_CompileString(str, filename, start) Py_CompileStringFlags(str, filename, start, NULL) #ifdef __cplusplus diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -2,11 +2,37 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( - cpython_api, build_type_checkers, PyObject, - CONST_STRING, CANNOT_FAIL, Py_ssize_t) + cpython_api, cpython_struct, build_type_checkers, bootstrap_function, + PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, RefcountState, from_ref) from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST +from pypy.objspace.std.intobject import W_IntObject import sys +PyIntObjectStruct = lltype.ForwardReference() +PyIntObject = lltype.Ptr(PyIntObjectStruct) +PyIntObjectFields = PyObjectFields + \ + (("ob_ival", rffi.LONG),) +cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct) + + at bootstrap_function +def init_intobject(space): + "Type description of PyIntObject" + make_typedescr(space.w_int.instancetypedef, + basestruct=PyIntObject.TO, + realize=int_realize) + +def int_realize(space, obj): + intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_IntObject, w_type) + w_obj.__init__(intval) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj + PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") @cpython_api([], lltype.Signed, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -193,7 +193,7 @@ if not obj: PyErr_NoMemory(space) obj.c_ob_type = type - _Py_NewReference(space, obj) + obj.c_ob_refcnt = 1 return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -17,6 +17,7 @@ class BaseCpyTypedescr(object): basestruct = PyObject.TO + W_BaseObject = W_ObjectObject def get_dealloc(self, space): from pypy.module.cpyext.typeobject import subtype_dealloc @@ -51,10 +52,14 @@ def attach(self, space, pyobj, w_obj): pass - def realize(self, space, ref): - # For most types, a reference cannot exist without - # a real interpreter object - raise InvalidPointerException(str(ref)) + def realize(self, space, obj): + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(self.W_BaseObject, w_type) + track_reference(space, obj, w_obj) + if w_type is not space.gettypefor(self.W_BaseObject): + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj typedescr_cache = {} @@ -369,13 +374,7 @@ obj.c_ob_refcnt = 1 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) - else: - assert False, "Please add more cases in _Py_NewReference()" + get_typedescr(w_type.instancetypedef).realize(space, obj) def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -182,16 +182,6 @@ used as the positional and keyword parameters to the object's constructor.""" raise NotImplementedError - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_Check(space, co): - """Return true if co is a code object""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_GetNumFree(space, co): - """Return the number of free variables in co.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1293,28 +1283,6 @@ that haven't been explicitly destroyed at that point.""" raise NotImplementedError - at cpython_api([rffi.VOIDP], lltype.Void) -def Py_AddPendingCall(space, func): - """Post a notification to the Python main thread. If successful, func will - be called with the argument arg at the earliest convenience. func will be - called having the global interpreter lock held and can thus use the full - Python API and can take any action such as setting object attributes to - signal IO completion. It must return 0 on success, or -1 signalling an - exception. The notification function won't be interrupted to perform another - asynchronous notification recursively, but it can still be interrupted to - switch threads if the global interpreter lock is released, for example, if it - calls back into Python code. - - This function returns 0 on success in which case the notification has been - scheduled. Otherwise, for example if the notification buffer is full, it - returns -1 without setting any exception. - - This function can be called on any thread, be it a Python thread or some - other system thread. If it is a Python thread, it doesn't matter if it holds - the global interpreter lock or not. - """ - raise NotImplementedError - @cpython_api([Py_tracefunc, PyObject], lltype.Void) def PyEval_SetProfile(space, func, obj): """Set the profiler function to func. The obj parameter is passed to the @@ -1875,26 +1843,6 @@ """ raise NotImplementedError - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISTITLE(space, ch): - """Return 1 or 0 depending on whether ch is a titlecase character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISDIGIT(space, ch): - """Return 1 or 0 depending on whether ch is a digit character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISNUMERIC(space, ch): - """Return 1 or 0 depending on whether ch is a numeric character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISALPHA(space, ch): - """Return 1 or 0 depending on whether ch is an alphabetic character.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP], PyObject) def PyUnicode_FromFormat(space, format): """Take a C printf()-style format string and a variable number of @@ -2339,17 +2287,6 @@ use the default error handling.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1) -def PyUnicode_Tailmatch(space, str, substr, start, end, direction): - """Return 1 if substr matches str*[*start:end] at the given tail end - (direction == -1 means to do a prefix match, direction == 1 a suffix match), - 0 otherwise. Return -1 if an error occurred. - - This function used an int type for start and end. This - might require changes in your code for properly supporting 64-bit - systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2) def PyUnicode_Find(space, str, substr, start, end, direction): """Return the first position of substr in str*[*start:end] using the given @@ -2373,16 +2310,6 @@ properly supporting 64-bit systems.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) -def PyUnicode_Replace(space, str, substr, replstr, maxcount): - """Replace at most maxcount occurrences of substr in str with replstr and - return the resulting Unicode object. maxcount == -1 means replace all - occurrences. - - This function used an int type for maxcount. This might - require changes in your code for properly supporting 64-bit systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyUnicode_RichCompare(space, left, right, op): """Rich compare two unicode strings and return one of the following: @@ -2556,17 +2483,6 @@ source code is read from fp instead of an in-memory string.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, PyCompilerFlags], PyObject) -def PyRun_StringFlags(space, str, start, globals, locals, flags): - """Execute Python source code from str in the context specified by the - dictionaries globals and locals with the compiler flags specified by - flags. The parameter start specifies the start token that should be used to - parse the source code. - - Returns the result of executing the code as a Python object, or NULL if an - exception was raised.""" - raise NotImplementedError - @cpython_api([FILE, rffi.CCHARP, rffi.INT_real, PyObject, PyObject, rffi.INT_real], PyObject) def PyRun_FileEx(space, fp, filename, start, globals, locals, closeit): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -2587,13 +2503,6 @@ returns.""" raise NotImplementedError - at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) -def PyEval_EvalCode(space, co, globals, locals): - """This is a simplified interface to PyEval_EvalCodeEx(), with just - the code object, and the dictionaries of global and local variables. - The other arguments are set to NULL.""" - raise NotImplementedError - @cpython_api([PyCodeObject, PyObject, PyObject, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObject], PyObject) def PyEval_EvalCodeEx(space, co, globals, locals, args, argcount, kws, kwcount, defs, defcount, closure): """Evaluate a precompiled code object, given a particular environment for its @@ -2618,12 +2527,6 @@ throw() methods of generator objects.""" raise NotImplementedError - at cpython_api([PyCompilerFlags], rffi.INT_real, error=CANNOT_FAIL) -def PyEval_MergeCompilerFlags(space, cf): - """This function changes the flags of the current evaluation frame, and returns - true on success, false on failure.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyWeakref_Check(space, ob): """Return true if ob is either a reference or proxy object. diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -38,3 +38,31 @@ def Py_MakePendingCalls(space): return 0 +pending_call = lltype.Ptr(lltype.FuncType([rffi.VOIDP], rffi.INT_real)) + at cpython_api([pending_call, rffi.VOIDP], rffi.INT_real, error=-1) +def Py_AddPendingCall(space, func, arg): + """Post a notification to the Python main thread. If successful, + func will be called with the argument arg at the earliest + convenience. func will be called having the global interpreter + lock held and can thus use the full Python API and can take any + action such as setting object attributes to signal IO completion. + It must return 0 on success, or -1 signalling an exception. The + notification function won't be interrupted to perform another + asynchronous notification recursively, but it can still be + interrupted to switch threads if the global interpreter lock is + released, for example, if it calls back into Python code. + + This function returns 0 on success in which case the notification + has been scheduled. Otherwise, for example if the notification + buffer is full, it returns -1 without setting any exception. + + This function can be called on any thread, be it a Python thread + or some other system thread. If it is a Python thread, it doesn't + matter if it holds the global interpreter lock or not. + """ + return -1 + +thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) + at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) +def PyThread_start_new_thread(space, func, arg): + return -1 diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -112,6 +112,37 @@ assert space.eq_w(space.len(w_copy), space.len(w_dict)) assert space.eq_w(w_copy, w_dict) + def test_iterkeys(self, space, api): + w_dict = space.sys.getdict(space) + py_dict = make_ref(space, w_dict) + + ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + + keys_w = [] + values_w = [] + try: + ppos[0] = 0 + while api.PyDict_Next(w_dict, ppos, pkey, None): + w_key = from_ref(space, pkey[0]) + keys_w.append(w_key) + ppos[0] = 0 + while api.PyDict_Next(w_dict, ppos, None, pvalue): + w_value = from_ref(space, pvalue[0]) + values_w.append(w_value) + finally: + lltype.free(ppos, flavor='raw') + lltype.free(pkey, flavor='raw') + lltype.free(pvalue, flavor='raw') + + api.Py_DecRef(py_dict) # release borrowed references + + assert space.eq_w(space.newlist(keys_w), + space.call_method(w_dict, "keys")) + assert space.eq_w(space.newlist(values_w), + space.call_method(w_dict, "values")) + def test_dictproxy(self, space, api): w_dict = space.sys.get('modules') w_proxy = api.PyDictProxy_New(w_dict) diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -2,9 +2,10 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.eval import ( - Py_single_input, Py_file_input, Py_eval_input) + Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags) from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP from pypy.interpreter.gateway import interp2app +from pypy.interpreter.astcompiler import consts from pypy.tool.udir import udir import sys, os @@ -63,6 +64,22 @@ assert space.int_w(w_res) == 10 + def test_evalcode(self, space, api): + w_f = space.appexec([], """(): + def f(*args): + assert isinstance(args, tuple) + return len(args) + 8 + return f + """) + + w_t = space.newtuple([space.wrap(1), space.wrap(2)]) + w_globals = space.newdict() + w_locals = space.newdict() + space.setitem(w_locals, space.wrap("args"), w_t) + w_res = api.PyEval_EvalCode(w_f.code, w_globals, w_locals) + + assert space.int_w(w_res) == 10 + def test_run_simple_string(self, space, api): def run(code): buf = rffi.str2charp(code) @@ -96,6 +113,20 @@ assert 42 * 43 == space.unwrap( api.PyObject_GetItem(w_globals, space.wrap("a"))) + def test_run_string_flags(self, space, api): + flags = lltype.malloc(PyCompilerFlags, flavor='raw') + flags.c_cf_flags = rffi.cast(rffi.INT, consts.PyCF_SOURCE_IS_UTF8) + w_globals = space.newdict() + buf = rffi.str2charp("a = u'caf\xc3\xa9'") + try: + api.PyRun_StringFlags(buf, Py_single_input, + w_globals, w_globals, flags) + finally: + rffi.free_charp(buf) + w_a = space.getitem(w_globals, space.wrap("a")) + assert space.unwrap(w_a) == u'caf\xe9' + lltype.free(flags, flavor='raw') + def test_run_file(self, space, api): filepath = udir / "cpyext_test_runfile.py" filepath.write("raise ZeroDivisionError") @@ -256,3 +287,21 @@ print dir(mod) print mod.__dict__ assert mod.f(42) == 47 + + def test_merge_compiler_flags(self): + module = self.import_extension('foo', [ + ("get_flags", "METH_NOARGS", + """ + PyCompilerFlags flags; + flags.cf_flags = 0; + int result = PyEval_MergeCompilerFlags(&flags); + return Py_BuildValue("ii", result, flags.cf_flags); + """), + ]) + assert module.get_flags() == (0, 0) + + ns = {'module':module} + exec """from __future__ import division \nif 1: + def nested_flags(): + return module.get_flags()""" in ns + assert ns['nested_flags']() == (1, 0x2000) # CO_FUTURE_DIVISION diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -81,6 +81,14 @@ rffi.free_charp(filename) rffi.free_charp(funcname) + def test_getnumfree(self, space, api): + w_function = space.appexec([], """(): + a = 5 + def method(x): return a, x + return method + """) + assert api.PyCode_GetNumFree(w_function.code) == 1 + def test_classmethod(self, space, api): w_function = space.appexec([], """(): def method(x): return x diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -65,4 +65,97 @@ values = module.values() types = [type(x) for x in values] assert types == [int, long, int, int] - + + def test_int_subtype(self): + module = self.import_extension( + 'foo', [ + ("newEnum", "METH_VARARGS", + """ + EnumObject *enumObj; + long intval; + PyObject *name; + + if (!PyArg_ParseTuple(args, "Oi", &name, &intval)) + return NULL; + + PyType_Ready(&Enum_Type); + enumObj = PyObject_New(EnumObject, &Enum_Type); + if (!enumObj) { + return NULL; + } + + enumObj->ob_ival = intval; + Py_INCREF(name); + enumObj->ob_name = name; + + return (PyObject *)enumObj; + """), + ], + prologue=""" + typedef struct + { + PyObject_HEAD + long ob_ival; + PyObject* ob_name; + } EnumObject; + + static void + enum_dealloc(EnumObject *op) + { + Py_DECREF(op->ob_name); + Py_TYPE(op)->tp_free((PyObject *)op); + } + + static PyMemberDef enum_members[] = { + {"name", T_OBJECT, offsetof(EnumObject, ob_name), 0, NULL}, + {NULL} /* Sentinel */ + }; + + PyTypeObject Enum_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Enum", + /*tp_basicsize*/ sizeof(EnumObject), + /*tp_itemsize*/ 0, + /*tp_dealloc*/ enum_dealloc, + /*tp_print*/ 0, + /*tp_getattr*/ 0, + /*tp_setattr*/ 0, + /*tp_compare*/ 0, + /*tp_repr*/ 0, + /*tp_as_number*/ 0, + /*tp_as_sequence*/ 0, + /*tp_as_mapping*/ 0, + /*tp_hash*/ 0, + /*tp_call*/ 0, + /*tp_str*/ 0, + /*tp_getattro*/ 0, + /*tp_setattro*/ 0, + /*tp_as_buffer*/ 0, + /*tp_flags*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + /*tp_doc*/ 0, + /*tp_traverse*/ 0, + /*tp_clear*/ 0, + /*tp_richcompare*/ 0, + /*tp_weaklistoffset*/ 0, + /*tp_iter*/ 0, + /*tp_iternext*/ 0, + /*tp_methods*/ 0, + /*tp_members*/ enum_members, + /*tp_getset*/ 0, + /*tp_base*/ &PyInt_Type, + /*tp_dict*/ 0, + /*tp_descr_get*/ 0, + /*tp_descr_set*/ 0, + /*tp_dictoffset*/ 0, + /*tp_init*/ 0, + /*tp_alloc*/ 0, + /*tp_new*/ 0 + }; + """) + + a = module.newEnum("ULTIMATE_ANSWER", 42) + assert type(a).__name__ == "Enum" + assert isinstance(a, int) + assert a == int(a) == 42 + assert a.name == "ULTIMATE_ANSWER" diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -204,8 +204,18 @@ assert api.Py_UNICODE_ISSPACE(unichr(char)) assert not api.Py_UNICODE_ISSPACE(u'a') + assert api.Py_UNICODE_ISALPHA(u'a') + assert not api.Py_UNICODE_ISALPHA(u'0') + assert api.Py_UNICODE_ISALNUM(u'a') + assert api.Py_UNICODE_ISALNUM(u'0') + assert not api.Py_UNICODE_ISALNUM(u'+') + assert api.Py_UNICODE_ISDECIMAL(u'\u0660') assert not api.Py_UNICODE_ISDECIMAL(u'a') + assert api.Py_UNICODE_ISDIGIT(u'9') + assert not api.Py_UNICODE_ISDIGIT(u'@') + assert api.Py_UNICODE_ISNUMERIC(u'9') + assert not api.Py_UNICODE_ISNUMERIC(u'@') for char in [0x0a, 0x0d, 0x1c, 0x1d, 0x1e, 0x85, 0x2028, 0x2029]: assert api.Py_UNICODE_ISLINEBREAK(unichr(char)) @@ -216,6 +226,9 @@ assert not api.Py_UNICODE_ISUPPER(u'a') assert not api.Py_UNICODE_ISLOWER(u'�') assert api.Py_UNICODE_ISUPPER(u'�') + assert not api.Py_UNICODE_ISTITLE(u'A') + assert api.Py_UNICODE_ISTITLE( + u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}') def test_TOLOWER(self, space, api): assert api.Py_UNICODE_TOLOWER(u'�') == u'�' @@ -429,3 +442,18 @@ w_char = api.PyUnicode_FromOrdinal(0xFFFF) assert space.unwrap(w_char) == u'\uFFFF' + def test_replace(self, space, api): + w_str = space.wrap(u"abababab") + w_substr = space.wrap(u"a") + w_replstr = space.wrap(u"z") + assert u"zbzbabab" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2)) + assert u"zbzbzbzb" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1)) + + def test_tailmatch(self, space, api): + w_str = space.wrap(u"abcdef") + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 10, 1) == 1 + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1 + self.raises(space, api, TypeError, + api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject, unicodetype, stringtype from pypy.rlib import runicode from pypy.tool.sourcetools import func_renamer import sys @@ -89,6 +89,11 @@ return unicodedb.isspace(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISALPHA(space, ch): + """Return 1 or 0 depending on whether ch is an alphabetic character.""" + return unicodedb.isalpha(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALNUM(space, ch): """Return 1 or 0 depending on whether ch is an alphanumeric character.""" return unicodedb.isalnum(ord(ch)) @@ -104,6 +109,16 @@ return unicodedb.isdecimal(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISDIGIT(space, ch): + """Return 1 or 0 depending on whether ch is a digit character.""" + return unicodedb.isdigit(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISNUMERIC(space, ch): + """Return 1 or 0 depending on whether ch is a numeric character.""" + return unicodedb.isnumeric(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLOWER(space, ch): """Return 1 or 0 depending on whether ch is a lowercase character.""" return unicodedb.islower(ord(ch)) @@ -113,6 +128,11 @@ """Return 1 or 0 depending on whether ch is an uppercase character.""" return unicodedb.isupper(ord(ch)) + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISTITLE(space, ch): + """Return 1 or 0 depending on whether ch is a titlecase character.""" + return unicodedb.istitle(ord(ch)) + @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" @@ -155,6 +175,11 @@ except KeyError: return -1.0 + at cpython_api([], Py_UNICODE, error=CANNOT_FAIL) +def PyUnicode_GetMax(space): + """Get the maximum ordinal for a Unicode character.""" + return unichr(runicode.MAXUNICODE) + @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): """Return a pointer to the internal buffer of the object. o has to be a @@ -548,6 +573,28 @@ @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Join(space, w_sep, w_seq): - """Join a sequence of strings using the given separator and return the resulting - Unicode string.""" + """Join a sequence of strings using the given separator and return + the resulting Unicode string.""" return space.call_method(w_sep, 'join', w_seq) + + at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) +def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount): + """Replace at most maxcount occurrences of substr in str with replstr and + return the resulting Unicode object. maxcount == -1 means replace all + occurrences.""" + return space.call_method(w_str, "replace", w_substr, w_replstr, + space.wrap(maxcount)) + + at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], + rffi.INT_real, error=-1) +def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction): + """Return 1 if substr matches str[start:end] at the given tail end + (direction == -1 means to do a prefix match, direction == 1 a + suffix match), 0 otherwise. Return -1 if an error occurred.""" + str = space.unicode_w(w_str) + substr = space.unicode_w(w_substr) + if rffi.cast(lltype.Signed, direction) >= 0: + return stringtype.stringstartswith(str, substr, start, end) + else: + return stringtype.stringendswith(str, substr, start, end) + diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -357,7 +357,7 @@ def test_cannot_write_pyc(self): import sys, os - p = os.path.join(sys.path[-1], 'readonly') + p = os.path.join(sys.path[0], 'readonly') try: os.chmod(p, 0555) except: diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -67,10 +67,12 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), ("copysign", "copysign"), ("cos", "cos"), + ("cosh", "cosh"), ("divide", "divide"), ("true_divide", "true_divide"), ("equal", "equal"), @@ -90,9 +92,11 @@ ("reciprocal", "reciprocal"), ("sign", "sign"), ("sin", "sin"), + ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), ("tan", "tan"), + ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.inttype import int_typedef @@ -29,7 +29,6 @@ def convert_to(self, dtype): return dtype.box(self.value) - class W_GenericBox(Wrappable): _attrs_ = () @@ -39,10 +38,10 @@ ) def descr_str(self, space): - return self.descr_repr(space) + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) - def descr_repr(self, space): - return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + def descr_format(self, space, w_spec): + return space.format(self.item(space), w_spec) def descr_int(self, space): box = self.convert_to(W_LongBox.get_dtype(space)) @@ -187,6 +186,10 @@ descr__new__, get_dtype = new_dtype_getter("float64") + at unwrap_spec(self=W_GenericBox) +def descr_index(space, self): + return space.index(self.item(space)) + W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -194,7 +197,8 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __str__ = interp2app(W_GenericBox.descr_str), - __repr__ = interp2app(W_GenericBox.descr_repr), + __repr__ = interp2app(W_GenericBox.descr_str), + __format__ = interp2app(W_GenericBox.descr_format), __int__ = interp2app(W_GenericBox.descr_int), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), @@ -245,6 +249,8 @@ W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), + + __index__ = interp2app(descr_index), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, @@ -266,36 +272,43 @@ W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __module__ = "numpypy", __new__ = interp2app(W_Int32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __module__ = "numpypy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) if LONG_BIT == 32: @@ -308,6 +321,7 @@ W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -779,8 +779,6 @@ """ Intermediate class for performing binary operations. """ - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): VirtualArray.__init__(self, name, shape, res_dtype) self.ufunc = ufunc @@ -856,8 +854,6 @@ self.right.create_sig(), done_func) class AxisReduce(Call2): - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim): Call2.__init__(self, ufunc, name, shape, dtype, dtype, left, right) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -3,7 +3,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype from pypy.objspace.std.strutil import strip_spaces - +from pypy.rlib import jit FLOAT_SIZE = rffi.sizeof(lltype.Float) @@ -72,11 +72,20 @@ "string is smaller than requested size")) a = W_NDimArray(count, [count], dtype=dtype) - for i in range(count): + fromstring_loop(a, count, dtype, itemsize, s) + return space.wrap(a) + +fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', + 'dtype', 's', 'a']) + +def fromstring_loop(a, count, dtype, itemsize, s): + i = 0 + while i < count: + fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, + itemsize=itemsize, s=s, i=i) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) a.dtype.setitem(a.storage, i, val) - - return space.wrap(a) + i += 1 @unwrap_spec(s=str, count=int, sep=str) def fromstring(space, s, w_dtype=None, count=-1, sep=''): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -435,7 +435,11 @@ ("arcsin", "arcsin", 1, {"promote_to_float": True}), ("arccos", "arccos", 1, {"promote_to_float": True}), ("arctan", "arctan", 1, {"promote_to_float": True}), + ("sinh", "sinh", 1, {"promote_to_float": True}), + ("cosh", "cosh", 1, {"promote_to_float": True}), + ("tanh", "tanh", 1, {"promote_to_float": True}), ("arcsinh", "arcsinh", 1, {"promote_to_float": True}), + ("arccosh", "arccosh", 1, {"promote_to_float": True}), ("arctanh", "arctanh", 1, {"promote_to_float": True}), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -371,6 +371,8 @@ assert type(a[1]) is numpy.float64 assert numpy.dtype(float).type is numpy.float64 + assert "{:3f}".format(numpy.float64(3)) == "3.000000" + assert numpy.float64(2.0) == 2.0 assert numpy.float64('23.4') == numpy.float64(23.4) raises(ValueError, numpy.float64, '23.2df') @@ -387,9 +389,9 @@ assert b.m() == 12 def test_long_as_index(self): - skip("waiting for removal of multimethods of __index__") - from _numpypy import int_ + from _numpypy import int_, float64 assert (1, 2, 3)[int_(1)] == 2 + raises(TypeError, lambda: (1, 2, 3)[float64(1)]) def test_int(self): import sys diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,6 +310,33 @@ b = arctan(a) assert math.isnan(b[0]) + def test_sinh(self): + import math + from _numpypy import array, sinh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = sinh(a) + for i in range(len(a)): + assert b[i] == math.sinh(a[i]) + + def test_cosh(self): + import math + from _numpypy import array, cosh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = cosh(a) + for i in range(len(a)): + assert b[i] == math.cosh(a[i]) + + def test_tanh(self): + import math + from _numpypy import array, tanh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = tanh(a) + for i in range(len(a)): + assert b[i] == math.tanh(a[i]) + def test_arcsinh(self): import math from _numpypy import arcsinh @@ -318,6 +345,15 @@ assert math.asinh(v) == arcsinh(v) assert math.isnan(arcsinh(float("nan"))) + def test_arccosh(self): + import math + from _numpypy import arccosh + + for v in [1.0, 1.1, 2]: + assert math.acosh(v) == arccosh(v) + for v in [-1.0, 0, .99]: + assert math.isnan(arccosh(v)) + def test_arctanh(self): import math from _numpypy import arctanh diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -479,38 +479,3 @@ 'int_sub': 3, 'jump': 1, 'setinteriorfield_raw': 1}) - - -class TestNumpyOld(LLJitMixin): - def setup_class(cls): - py.test.skip("old") - from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import get_dtype_cache - - cls.space = FakeSpace() - cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype - - def test_int32_sum(self): - py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " - "deal correctly with int dtypes for this test to " - "work. skip for now until someone feels up to the task") - space = self.space - float64_dtype = self.float64_dtype - int32_dtype = self.int32_dtype - - def f(n): - if NonConstant(False): - dtype = float64_dtype - else: - dtype = int32_dtype - ar = W_NDimArray(n, [n], dtype=dtype) - i = 0 - while i < n: - ar.get_concrete().setitem(i, int32_dtype.box(7)) - i += 1 - v = ar.descr_add(space, ar).descr_sum(space) - assert isinstance(v, IntObject) - return v.intval - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - assert result == f(5) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -489,10 +489,28 @@ return math.atan(v) @simple_unary_op + def sinh(self, v): + return math.sinh(v) + + @simple_unary_op + def cosh(self, v): + return math.cosh(v) + + @simple_unary_op + def tanh(self, v): + return math.tanh(v) + + @simple_unary_op def arcsinh(self, v): return math.asinh(v) @simple_unary_op + def arccosh(self, v): + if v < 1.0: + return rfloat.NAN + return math.acosh(v) + + @simple_unary_op def arctanh(self, v): if v == 1.0 or v == -1.0: return math.copysign(rfloat.INFINITY, v) diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -60,6 +60,9 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if getattr(pipe, 'returncode', 0) < 0: + raise IOError("subprocess was killed by signal %d" % ( + pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -0,0 +1,26 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestAlloc(BaseTestPyPyC): + + SIZES = dict.fromkeys([2 ** n for n in range(26)] + # up to 32MB + [2 ** n - 1 for n in range(26)]) + + def test_newstr_constant_size(self): + for size in TestAlloc.SIZES: + yield self.newstr_constant_size, size + + def newstr_constant_size(self, size): + src = """if 1: + N = %(size)d + part_a = 'a' * N + part_b = 'b' * N + for i in xrange(20): + ao = '%%s%%s' %% (part_a, part_b) + def main(): + return 42 +""" % {'size': size} + log = self.run(src, [], threshold=10) + assert log.result == 42 + loop, = log.loops_by_filename(self.filepath) + # assert did not crash diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -201,3 +201,28 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("compare", "") # optimized away + def test_super(self): + def main(): + class A(object): + def m(self, x): + return x + 1 + class B(A): + def m(self, x): + return super(B, self).m(x) + i = 0 + while i < 300: + i = B().m(i) + return i + + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i78 = int_lt(i72, 300) + guard_true(i78, descr=...) + guard_not_invalidated(descr=...) + i79 = force_token() + i80 = force_token() + i81 = int_add(i72, 1) + --TICK-- + jump(..., descr=...) + """) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py @@ -97,6 +97,16 @@ tf_b.errcheck = errcheck assert tf_b(-126) == 'hello' + def test_array_to_ptr(self): + ARRAY = c_int * 8 + func = dll._testfunc_ai8 + func.restype = POINTER(c_int) + func.argtypes = [ARRAY] + array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8) + ptr = func(array) + assert ptr[0] == 1 + assert ptr[7] == 8 + class TestFallbackToSlowpath(BaseCTypesTestChecker): diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py @@ -246,6 +246,14 @@ def func(): pass CFUNCTYPE(None, c_int * 3)(func) + def test_array_to_ptr_wrongtype(self): + ARRAY = c_byte * 8 + func = testdll._testfunc_ai8 + func.restype = POINTER(c_int) + func.argtypes = [c_int * 8] + array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8) + py.test.raises(ArgumentError, "func(array)") + ################################################################ if __name__ == '__main__': diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -3,7 +3,7 @@ import py import time -import datetime +from lib_pypy import datetime import copy import os @@ -43,4 +43,4 @@ dt = datetime.datetime.utcnow() assert type(dt.microsecond) is int - copy.copy(dt) \ No newline at end of file + copy.copy(dt) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -142,6 +142,17 @@ else: return result + def popitem(self, w_dict): + # this is a bad implementation: if we call popitem() repeatedly, + # it ends up taking n**2 time, because the next() calls below + # will take longer and longer. But all interesting strategies + # provide a better one. + space = self.space + iterator = self.iter(w_dict) + w_key, w_value = iterator.next() + self.delitem(w_dict, w_key) + return (w_key, w_value) + def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -3,7 +3,7 @@ from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation from pypy.objspace.std.dictmultiobject import DictStrategy from pypy.objspace.std.typeobject import unwrap_cell -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib import rerased @@ -44,7 +44,8 @@ raise if not w_type.is_cpytype(): raise - # xxx obscure workaround: allow cpyext to write to type->tp_dict. + # xxx obscure workaround: allow cpyext to write to type->tp_dict + # xxx even in the case of a builtin type. # xxx like CPython, we assume that this is only done early after # xxx the type is created, and we don't invalidate any cache. w_type.dict_w[key] = w_value @@ -86,8 +87,14 @@ for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()] def clear(self, w_dict): - self.unerase(w_dict.dstorage).dict_w.clear() - self.unerase(w_dict.dstorage).mutated(None) + space = self.space + w_type = self.unerase(w_dict.dstorage) + if (not space.config.objspace.std.mutable_builtintypes + and not w_type.is_heaptype()): + msg = "can't clear dictionary of type '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_type.name) + w_type.dict_w.clear() + w_type.mutated(None) class DictProxyIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, dictimplementation): diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -22,6 +22,9 @@ assert NotEmpty.string == 1 raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') + key, value = NotEmpty.__dict__.popitem() + assert (key == 'a' and value == 1) or (key == 'b' and value == 4) + def test_dictproxyeq(self): class a(object): pass @@ -43,6 +46,11 @@ assert s1 == s2 assert s1.startswith('{') and s1.endswith('}') + def test_immutable_dict_on_builtin_type(self): + raises(TypeError, "int.__dict__['a'] = 1") + raises(TypeError, int.__dict__.popitem) + raises(TypeError, int.__dict__.clear) + class AppTestUserObjectMethodCache(AppTestUserObject): def setup_class(cls): cls.space = gettestobjspace( diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -993,7 +993,9 @@ raises(TypeError, setattr, list, 'append', 42) raises(TypeError, setattr, list, 'foobar', 42) raises(TypeError, delattr, dict, 'keys') - + raises(TypeError, 'int.__dict__["a"] = 1') + raises(TypeError, 'int.__dict__.clear()') + def test_nontype_in_mro(self): class OldStyle: pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -103,6 +103,7 @@ 'terminator', '_version_tag?', 'name?', + 'mro_w?[*]', ] # for config.objspace.std.getattributeshortcut @@ -345,9 +346,9 @@ return w_self._lookup_where(name) + @unroll_safe def lookup_starting_at(w_self, w_starttype, name): space = w_self.space - # XXX Optimize this with method cache look = False for w_class in w_self.mro_w: if w_class is w_starttype: diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py --- a/pypy/rlib/debug.py +++ b/pypy/rlib/debug.py @@ -19,14 +19,24 @@ hop.exception_cannot_occur() hop.genop('debug_assert', vlist) -def fatalerror(msg, traceback=False): +def fatalerror(msg): + # print the RPython traceback and abort with a fatal error from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop - if traceback: - llop.debug_print_traceback(lltype.Void) + llop.debug_print_traceback(lltype.Void) llop.debug_fatalerror(lltype.Void, msg) fatalerror._dont_inline_ = True -fatalerror._annspecialcase_ = 'specialize:arg(1)' +fatalerror._jit_look_inside_ = False +fatalerror._annenforceargs_ = [str] + +def fatalerror_notb(msg): + # a variant of fatalerror() that doesn't print the RPython traceback + from pypy.rpython.lltypesystem import lltype + from pypy.rpython.lltypesystem.lloperation import llop + llop.debug_fatalerror(lltype.Void, msg) +fatalerror_notb._dont_inline_ = True +fatalerror_notb._jit_look_inside_ = False +fatalerror_notb._annenforceargs_ = [str] class DebugLog(list): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -450,6 +450,7 @@ assert v in self.reds self._alllivevars = dict.fromkeys( [name for name in self.greens + self.reds if '.' not in name]) + self._heuristic_order = {} # check if 'reds' and 'greens' are ordered self._make_extregistryentries() self.get_jitcell_at = get_jitcell_at self.set_jitcell_at = set_jitcell_at @@ -461,13 +462,61 @@ def _freeze_(self): return True + def _check_arguments(self, livevars): + assert dict.fromkeys(livevars) == self._alllivevars + # check heuristically that 'reds' and 'greens' are ordered as + # the JIT will need them to be: first INTs, then REFs, then + # FLOATs. + if len(self._heuristic_order) < len(livevars): + from pypy.rlib.rarithmetic import (r_singlefloat, r_longlong, + r_ulonglong, r_uint) + added = False + for var, value in livevars.items(): + if var not in self._heuristic_order: + if (r_ulonglong is not r_uint and + isinstance(value, (r_longlong, r_ulonglong))): + assert 0, ("should not pass a r_longlong argument for " + "now, because on 32-bit machines it needs " + "to be ordered as a FLOAT but on 64-bit " + "machines as an INT") + elif isinstance(value, (int, long, r_singlefloat)): + kind = '1:INT' + elif isinstance(value, float): + kind = '3:FLOAT' + elif isinstance(value, (str, unicode)) and len(value) != 1: + kind = '2:REF' + elif isinstance(value, (list, dict)): + kind = '2:REF' + elif (hasattr(value, '__class__') + and value.__class__.__module__ != '__builtin__'): + if hasattr(value, '_freeze_'): + continue # value._freeze_() is better not called + elif getattr(value, '_alloc_flavor_', 'gc') == 'gc': + kind = '2:REF' + else: + kind = '1:INT' + else: + continue + self._heuristic_order[var] = kind + added = True + if added: + for color in ('reds', 'greens'): + lst = getattr(self, color) + allkinds = [self._heuristic_order.get(name, '?') + for name in lst] + kinds = [k for k in allkinds if k != '?'] + assert kinds == sorted(kinds), ( + "bad order of %s variables in the jitdriver: " + "must be INTs, REFs, FLOATs; got %r" % + (color, allkinds)) + def jit_merge_point(_self, **livevars): # special-cased by ExtRegistryEntry - assert dict.fromkeys(livevars) == _self._alllivevars + _self._check_arguments(livevars) def can_enter_jit(_self, **livevars): # special-cased by ExtRegistryEntry - assert dict.fromkeys(livevars) == _self._alllivevars + _self._check_arguments(livevars) def loop_header(self): # special-cased by ExtRegistryEntry diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -23,9 +23,11 @@ class _Specialize(object): def memo(self): - """ Specialize functions based on argument values. All arguments has - to be constant at the compile time. The whole function call is replaced - by a call result then. + """ Specialize the function based on argument values. All arguments + have to be either constants or PBCs (i.e. instances of classes with a + _freeze_ method returning True). The function call is replaced by + just its result, or in case several PBCs are used, by some fast + look-up of the result. """ def decorated_func(func): func._annspecialcase_ = 'specialize:memo' @@ -33,8 +35,8 @@ return decorated_func def arg(self, *args): - """ Specialize function based on values of given positions of arguments. - They must be compile-time constants in order to work. + """ Specialize the function based on the values of given positions + of arguments. They must be compile-time constants in order to work. There will be a copy of provided function for each combination of given arguments on positions in args (that can lead to @@ -82,8 +84,7 @@ return decorated_func def ll_and_arg(self, *args): - """ This is like ll(), but instead of specializing on all arguments, - specializes on only the arguments at the given positions + """ This is like ll(), and additionally like arg(...). """ def decorated_func(func): func._annspecialcase_ = 'specialize:ll_and_arg' + self._wrap(args) diff --git a/pypy/rlib/test/test_jit.py b/pypy/rlib/test/test_jit.py --- a/pypy/rlib/test/test_jit.py +++ b/pypy/rlib/test/test_jit.py @@ -2,6 +2,7 @@ from pypy.conftest import option from pypy.rlib.jit import hint, we_are_jitted, JitDriver, elidable_promote from pypy.rlib.jit import JitHintError, oopspec, isconstant +from pypy.rlib.rarithmetic import r_uint from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem import lltype @@ -146,6 +147,43 @@ res = self.interpret(f, [-234]) assert res == 1 + def test_argument_order_ok(self): + myjitdriver = JitDriver(greens=['i1', 'r1', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=A(), f1=3.5) + # assert did not raise + + def test_argument_order_wrong(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'f1'], reds=[]) + class A(object): + pass + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), f1=3.5) + + def test_argument_order_more_precision_later(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=None, r2=None, f1=3.5) + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5) + assert "got ['2:REF', '1:INT', '?', '3:FLOAT']" in repr(e.value) + + def test_argument_order_more_precision_later_2(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=None, r2=A(), f1=3.5) + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5) + assert "got ['2:REF', '1:INT', '2:REF', '3:FLOAT']" in repr(e.value) + + def test_argument_order_accept_r_uint(self): + # this used to fail on 64-bit, because r_uint == r_ulonglong + myjitdriver = JitDriver(greens=['i1'], reds=[]) + myjitdriver.jit_merge_point(i1=r_uint(42)) + class TestJITLLtype(BaseTestJIT, LLRtypeMixin): pass diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -392,7 +392,11 @@ ('list', r_list.lowleveltype), ('index', Signed))) self.ll_listiter = ll_listiter - self.ll_listnext = ll_listnext + if (isinstance(r_list, FixedSizeListRepr) + and not r_list.listitem.mutated): + self.ll_listnext = ll_listnext_foldable + else: + self.ll_listnext = ll_listnext self.ll_getnextindex = ll_getnextindex def ll_listiter(ITERPTR, lst): @@ -409,5 +413,14 @@ iter.index = index + 1 # cannot overflow because index < l.length return l.ll_getitem_fast(index) +def ll_listnext_foldable(iter): + from pypy.rpython.rlist import ll_getitem_foldable_nonneg + l = iter.list + index = iter.index + if index >= l.ll_length(): + raise StopIteration + iter.index = index + 1 # cannot overflow because index < l.length + return ll_getitem_foldable_nonneg(l, index) + def ll_getnextindex(iter): return iter.index diff --git a/pypy/rpython/memory/gc/generation.py b/pypy/rpython/memory/gc/generation.py --- a/pypy/rpython/memory/gc/generation.py +++ b/pypy/rpython/memory/gc/generation.py @@ -41,8 +41,8 @@ # the following values override the default arguments of __init__ when # translating to a real backend. - TRANSLATION_PARAMS = {'space_size': 8*1024*1024, # XXX adjust - 'nursery_size': 896*1024, + TRANSLATION_PARAMS = {'space_size': 8*1024*1024, # 8 MB + 'nursery_size': 3*1024*1024, # 3 MB 'min_nursery_size': 48*1024, 'auto_nursery_size': True} @@ -92,8 +92,9 @@ # the GC is fully setup now. The rest can make use of it. if self.auto_nursery_size: newsize = nursery_size_from_env() - if newsize <= 0: - newsize = env.estimate_best_nursery_size() + #if newsize <= 0: + # ---disabled--- just use the default value. + # newsize = env.estimate_best_nursery_size() if newsize > 0: self.set_nursery_size(newsize) diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -608,6 +608,11 @@ specified as 0 if the object is not varsized. The returned object is fully initialized and zero-filled.""" # + # Here we really need a valid 'typeid', not 0 (as the JIT might + # try to send us if there is still a bug). + ll_assert(bool(self.combine(typeid, 0)), + "external_malloc: typeid == 0") + # # Compute the total size, carefully checking for overflows. size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + self.fixed_size(typeid) diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -8,6 +8,7 @@ from pypy.rpython.rlist import * from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist from pypy.rpython.lltypesystem import rlist as ll_rlist +from pypy.rpython.llinterp import LLException from pypy.rpython.ootypesystem import rlist as oo_rlist from pypy.rpython.rint import signed_repr from pypy.objspace.flow.model import Constant, Variable @@ -1477,6 +1478,80 @@ assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') + def test_iterate_over_immutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_immutable_list_quasiimmut_attr(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + class Foo: + _immutable_fields_ = ['lst?[*]'] + lst = list('abcdef') + foo = Foo() + def dummyfn(): + total = 0 + for c in foo.lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_mutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + lst[0] = 'x' + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + res = self.interpret(dummyfn, []) + assert res == sum(map(ord, 'abcdef')) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + class TestOOtype(BaseTestRlist, OORtypeMixin): rlist = oo_rlist type_system = 'ootype' diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -387,7 +387,7 @@ m = re.search('guard \d+', comm) name = m.group(0) else: - name = comm[2:comm.find(':')-1] + name = " ".join(comm[2:].split(" ", 2)[:2]) if name in dumps: bname, start_ofs, dump = dumps[name] loop.force_asm = (lambda dump=dump, start_ofs=start_ofs, diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -82,6 +82,9 @@ for file in ['LICENSE', 'README']: shutil.copy(str(basedir.join(file)), str(pypydir)) pypydir.ensure('include', dir=True) + if sys.platform == 'win32': + shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")), + str(pypydir.join('include/python27.lib'))) # we want to put there all *.h and *.inl from trunk/include # and from pypy/_interfaces includedir = basedir.join('include') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -472,7 +472,7 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwde', 'prefetch', + 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', + 'paddq', 'pinsr', 'pmul', 'psrl', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/c/src/asm_gcc_x86.h b/pypy/translator/c/src/asm_gcc_x86.h --- a/pypy/translator/c/src/asm_gcc_x86.h +++ b/pypy/translator/c/src/asm_gcc_x86.h @@ -102,6 +102,12 @@ #endif /* !PYPY_CPU_HAS_STANDARD_PRECISION */ +#ifdef PYPY_X86_CHECK_SSE2 +#define PYPY_X86_CHECK_SSE2_DEFINED +extern void pypy_x86_check_sse2(void); +#endif + + /* implementations */ #ifndef PYPY_NOT_MAIN_FILE @@ -113,4 +119,25 @@ } # endif +# ifdef PYPY_X86_CHECK_SSE2 +void pypy_x86_check_sse2(void) +{ + //Read the CPU features. + int features; + asm("mov $1, %%eax\n" + "cpuid\n" + "mov %%edx, %0" + : "=g"(features) : : "eax", "ebx", "edx", "ecx"); + + //Check bits 25 and 26, this indicates SSE2 support + if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0)) + { + fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n" + "You need to re-translate with " + "'--jit-backend=x86-without-sse2'\n"); + abort(); + } +} +# endif + #endif diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c --- a/pypy/translator/c/src/debug_print.c +++ b/pypy/translator/c/src/debug_print.c @@ -1,3 +1,4 @@ +#define PYPY_NOT_MAIN_FILE #include #include diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -46,13 +46,13 @@ * of return type *Bigint all return NULL to indicate a malloc failure. * Similarly, rv_alloc and nrv_alloc (return type char *) return NULL on * failure. bigcomp now has return type int (it used to be void) and - * returns -1 on failure and 0 otherwise. _Py_dg_dtoa returns NULL - * on failure. _Py_dg_strtod indicates failure due to malloc failure + * returns -1 on failure and 0 otherwise. __Py_dg_dtoa returns NULL + * on failure. __Py_dg_strtod indicates failure due to malloc failure * by returning -1.0, setting errno=ENOMEM and *se to s00. * * 4. The static variable dtoa_result has been removed. Callers of - * _Py_dg_dtoa are expected to call _Py_dg_freedtoa to free - * the memory allocated by _Py_dg_dtoa. + * __Py_dg_dtoa are expected to call __Py_dg_freedtoa to free + * the memory allocated by __Py_dg_dtoa. * * 5. The code has been reformatted to better fit with Python's * C style guide (PEP 7). @@ -61,7 +61,7 @@ * that hasn't been MALLOC'ed, private_mem should only be used when k <= * Kmax. * - * 7. _Py_dg_strtod has been modified so that it doesn't accept strings with + * 7. __Py_dg_strtod has been modified so that it doesn't accept strings with * leading whitespace. * ***************************************************************/ @@ -283,7 +283,7 @@ #define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1)) #define Big1 0xffffffff -/* struct BCinfo is used to pass information from _Py_dg_strtod to bigcomp */ +/* struct BCinfo is used to pass information from __Py_dg_strtod to bigcomp */ typedef struct BCinfo BCinfo; struct @@ -494,7 +494,7 @@ /* convert a string s containing nd decimal digits (possibly containing a decimal separator at position nd0, which is ignored) to a Bigint. This - function carries on where the parsing code in _Py_dg_strtod leaves off: on + function carries on where the parsing code in __Py_dg_strtod leaves off: on entry, y9 contains the result of converting the first 9 digits. Returns NULL on failure. */ @@ -1050,7 +1050,7 @@ } /* Convert a scaled double to a Bigint plus an exponent. Similar to d2b, - except that it accepts the scale parameter used in _Py_dg_strtod (which + except that it accepts the scale parameter used in __Py_dg_strtod (which should be either 0 or 2*P), and the normalization for the return value is different (see below). On input, d should be finite and nonnegative, and d / 2**scale should be exactly representable as an IEEE 754 double. @@ -1351,9 +1351,9 @@ /* The bigcomp function handles some hard cases for strtod, for inputs with more than STRTOD_DIGLIM digits. It's called once an initial estimate for the double corresponding to the input string has - already been obtained by the code in _Py_dg_strtod. + already been obtained by the code in __Py_dg_strtod. - The bigcomp function is only called after _Py_dg_strtod has found a + The bigcomp function is only called after __Py_dg_strtod has found a double value rv such that either rv or rv + 1ulp represents the correctly rounded value corresponding to the original string. It determines which of these two values is the correct one by @@ -1368,12 +1368,12 @@ s0 points to the first significant digit of the input string. rv is a (possibly scaled) estimate for the closest double value to the - value represented by the original input to _Py_dg_strtod. If + value represented by the original input to __Py_dg_strtod. If bc->scale is nonzero, then rv/2^(bc->scale) is the approximation to the input value. bc is a struct containing information gathered during the parsing and - estimation steps of _Py_dg_strtod. Description of fields follows: + estimation steps of __Py_dg_strtod. Description of fields follows: bc->e0 gives the exponent of the input value, such that dv = (integer given by the bd->nd digits of s0) * 10**e0 @@ -1505,7 +1505,7 @@ } static double -_Py_dg_strtod(const char *s00, char **se) +__Py_dg_strtod(const char *s00, char **se) { int bb2, bb5, bbe, bd2, bd5, bs2, c, dsign, e, e1, error; int esign, i, j, k, lz, nd, nd0, odd, sign; @@ -1849,7 +1849,7 @@ for(;;) { - /* This is the main correction loop for _Py_dg_strtod. + /* This is the main correction loop for __Py_dg_strtod. We've got a decimal value tdv, and a floating-point approximation srv=rv/2^bc.scale to tdv. The aim is to determine whether srv is @@ -2283,7 +2283,7 @@ */ static void -_Py_dg_freedtoa(char *s) +__Py_dg_freedtoa(char *s) { Bigint *b = (Bigint *)((int *)s - 1); b->maxwds = 1 << (b->k = *(int*)b); @@ -2325,11 +2325,11 @@ */ /* Additional notes (METD): (1) returns NULL on failure. (2) to avoid memory - leakage, a successful call to _Py_dg_dtoa should always be matched by a - call to _Py_dg_freedtoa. */ + leakage, a successful call to __Py_dg_dtoa should always be matched by a + call to __Py_dg_freedtoa. */ static char * -_Py_dg_dtoa(double dd, int mode, int ndigits, +__Py_dg_dtoa(double dd, int mode, int ndigits, int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those @@ -2926,7 +2926,7 @@ if (b) Bfree(b); if (s0) - _Py_dg_freedtoa(s0); + __Py_dg_freedtoa(s0); return NULL; } @@ -2947,7 +2947,7 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_strtod(s00, se); + result = __Py_dg_strtod(s00, se); _PyPy_SET_53BIT_PRECISION_END; return result; } @@ -2959,14 +2959,14 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); + result = __Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); _PyPy_SET_53BIT_PRECISION_END; return result; } void _PyPy_dg_freedtoa(char *s) { - _Py_dg_freedtoa(s); + __Py_dg_freedtoa(s); } /* End PYPY hacks */ diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h --- a/pypy/translator/c/src/main.h +++ b/pypy/translator/c/src/main.h @@ -36,6 +36,9 @@ RPyListOfString *list; pypy_asm_stack_bottom(); +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif instrument_setup(); if (sizeof(void*) != SIZEOF_LONG) { diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -559,6 +559,9 @@ newsoname = newexename.new(basename=soname.basename) shutil.copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) + if sys.platform == 'win32': + shutil.copyfile(str(soname.new(ext='lib')), + str(newsoname.new(ext='lib'))) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/pypy/translator/sandbox/test/test_sandbox.py b/pypy/translator/sandbox/test/test_sandbox.py --- a/pypy/translator/sandbox/test/test_sandbox.py +++ b/pypy/translator/sandbox/test/test_sandbox.py @@ -145,9 +145,9 @@ g = pipe.stdin f = pipe.stdout expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GENERATIONGC_NURSERY",), None) - if sys.platform.startswith('linux'): # on Mac, uses another (sandboxsafe) approach - expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420), - OSError(5232, "xyz")) + #if sys.platform.startswith('linux'): + # expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420), + # OSError(5232, "xyz")) expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GC_DEBUG",), None) g.close() tail = f.read() From noreply at buildbot.pypy.org Thu Mar 1 09:07:34 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:34 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20120301080734.7EF658204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53036:3847eddabfa1 Date: 2012-02-28 10:35 -0800 http://bitbucket.org/pypy/pypy/changeset/3847eddabfa1/ Log: merge default into branch diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py --- a/lib-python/modified-2.7/ctypes/test/test_arrays.py +++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py @@ -1,12 +1,23 @@ import unittest from ctypes import * +from test.test_support import impl_detail formats = "bBhHiIlLqQfd" +# c_longdouble commented out for PyPy, look at the commend in test_longdouble formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ - c_long, c_ulonglong, c_float, c_double, c_longdouble + c_long, c_ulonglong, c_float, c_double #, c_longdouble class ArrayTestCase(unittest.TestCase): + + @impl_detail('long double not supported by PyPy', pypy=False) + def test_longdouble(self): + """ + This test is empty. It's just here to remind that we commented out + c_longdouble in "formats". If pypy will ever supports c_longdouble, we + should kill this test and uncomment c_longdouble inside formats. + """ + def test_simple(self): # create classes holding simple numeric types, and check # various properties. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1471,8 +1471,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -769,11 +769,19 @@ self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) - # Rarely called: allocate a fixed-size amount of bytes, but - # not in the nursery, because it is too big. Implemented like - # malloc_nursery_slowpath() above. - self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, - [lltype.Signed]) + # Never called as far as I can tell, but there for completeness: + # allocate a fixed-size object, but not in the nursery, because + # it is too big. + def malloc_big_fixedsize(size, tid): + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) def _bh_malloc(self, sizedescr): from pypy.rpython.memory.gctypelayout import check_typeid diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -96,8 +96,10 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - self.gen_malloc_nursery(size, op.result) - self.gen_initialize_tid(op.result, descr.tid) + if self.gen_malloc_nursery(size, op.result): + self.gen_initialize_tid(op.result, descr.tid) + else: + self.gen_malloc_fixedsize(size, descr.tid, op.result) def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) @@ -112,8 +114,8 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily - self.gen_malloc_nursery(total_size, op.result) + if (total_size >= 0 and + self.gen_malloc_nursery(total_size, op.result)): self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) elif self.gc_ll_descr.kind == 'boehm': @@ -147,13 +149,22 @@ # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None - def gen_malloc_fixedsize(self, size, v_result): - """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). - Note that with the framework GC, this should be called very rarely. + def gen_malloc_fixedsize(self, size, typeid, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). + Used on Boehm, and on the framework GC for large fixed-size + mallocs. (For all I know this latter case never occurs in + practice, but better safe than sorry.) """ - addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') - self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, - self.gc_ll_descr.malloc_fixedsize_descr) + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + assert (size & (WORD-1)) == 0, "size not aligned?" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + else: # Boehm + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + args = [ConstInt(addr), ConstInt(size)] + descr = self.gc_ll_descr.malloc_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -211,8 +222,7 @@ """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_fixedsize(size, v_result) - return + return False # op = None if self._op_malloc_nursery is not None: @@ -238,6 +248,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -119,12 +119,19 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=alendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + 10, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(adescr.basesize + 10 * adescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=alendescr) def test_new_array_variable(self): self.check_rewrite(""" @@ -178,13 +185,20 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=unicodelendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(unicodedescr.basesize)d, \ + 10, \ + %(unicodedescr.itemsize)d, \ + %(unicodelendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(unicodedescr.basesize + \ +## 10 * unicodedescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=unicodelendescr) class TestFramework(RewriteTests): @@ -203,7 +217,7 @@ # class FakeCPU(object): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(102) + descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -368,11 +382,9 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 104)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 103, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 103, \ + descr=malloc_array_descr) jump() """) @@ -435,9 +447,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 9315, descr=tiddescr) + p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ + descr=malloc_big_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -289,8 +289,21 @@ assert isinstance(token, TargetToken) assert token.original_jitcell_token is None token.original_jitcell_token = trace.original_jitcell_token - - + + +def do_compile_loop(metainterp_sd, inputargs, operations, looptoken, + log=True, name=''): + metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, + 'compiling', name=name) + return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + log=log, name=name) + +def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, + original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2) + return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -319,9 +332,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - original_jitcell_token, - name=loopname) + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -333,7 +346,6 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: @@ -365,9 +377,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs, - operations, - original_loop_token) + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -18,6 +18,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif number == -2: + debug_start("jit-log-compiling-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, @@ -31,6 +35,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif number == -2: + debug_start("jit-log-compiling-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,50 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_maybe_issue1045_related(self): + ops = """ + [p8] + p54 = getfield_gc(p8, descr=valuedescr) + mark_opaque_ptr(p54) + i55 = getfield_gc(p54, descr=nextdescr) + p57 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p57, i55, descr=otherdescr) + p69 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p69, i55, descr=otherdescr) + i71 = int_eq(i55, -9223372036854775808) + guard_false(i71) [] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + p79 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p79, i77, descr=otherdescr) + i81 = int_eq(i77, 1) + guard_false(i81) [] + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(p57) + jump(p57) + """ + expected = """ + [p8] + p54 = getfield_gc(p8, descr=valuedescr) + i55 = getfield_gc(p54, descr=nextdescr) + i71 = int_eq(i55, -9223372036854775808) + guard_false(i71) [] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + guard_false(i81) [] + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + jump(i55) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -457,7 +501,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe def test_get_current_qmut_instance(): @@ -480,6 +480,32 @@ assert res == 1 self.check_jitcell_token_count(2) + def test_for_loop_array(self): + myjitdriver = JitDriver(greens=[], reds=["n", "i"]) + class Foo(object): + _immutable_fields_ = ["x?[*]"] + def __init__(self, x): + self.x = x + f = Foo([1, 3, 5, 6]) + @unroll_safe + def g(v): + for x in f.x: + if x & 1 == 0: + v += 1 + return v + def main(n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i) + i = g(i) + return i + res = self.meta_interp(main, [10]) + assert res == 10 + self.check_resops({ + "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2, + "guard_not_invalidated": 2 + }) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -357,7 +357,7 @@ def test_cannot_write_pyc(self): import sys, os - p = os.path.join(sys.path[-1], 'readonly') + p = os.path.join(sys.path[0], 'readonly') try: os.chmod(p, 0555) except: diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -779,8 +779,6 @@ """ Intermediate class for performing binary operations. """ - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): VirtualArray.__init__(self, name, shape, res_dtype) self.ufunc = ufunc @@ -856,8 +854,6 @@ self.right.create_sig(), done_func) class AxisReduce(Call2): - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim): Call2.__init__(self, ufunc, name, shape, dtype, dtype, left, right) diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -60,6 +60,9 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if getattr(pipe, 'returncode', 0) < 0: + raise IOError("subprocess was killed by signal %d" % ( + pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -0,0 +1,26 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestAlloc(BaseTestPyPyC): + + SIZES = dict.fromkeys([2 ** n for n in range(26)] + # up to 32MB + [2 ** n - 1 for n in range(26)]) + + def test_newstr_constant_size(self): + for size in TestAlloc.SIZES: + yield self.newstr_constant_size, size + + def newstr_constant_size(self, size): + src = """if 1: + N = %(size)d + part_a = 'a' * N + part_b = 'b' * N + for i in xrange(20): + ao = '%%s%%s' %% (part_a, part_b) + def main(): + return 42 +""" % {'size': size} + log = self.run(src, [], threshold=10) + assert log.result == 42 + loop, = log.loops_by_filename(self.filepath) + # assert did not crash diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -201,3 +201,28 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("compare", "") # optimized away + def test_super(self): + def main(): + class A(object): + def m(self, x): + return x + 1 + class B(A): + def m(self, x): + return super(B, self).m(x) + i = 0 + while i < 300: + i = B().m(i) + return i + + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i78 = int_lt(i72, 300) + guard_true(i78, descr=...) + guard_not_invalidated(descr=...) + i79 = force_token() + i80 = force_token() + i81 = int_add(i72, 1) + --TICK-- + jump(..., descr=...) + """) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -103,6 +103,7 @@ 'terminator', '_version_tag?', 'name?', + 'mro_w?[*]', ] # for config.objspace.std.getattributeshortcut @@ -345,9 +346,9 @@ return w_self._lookup_where(name) + @unroll_safe def lookup_starting_at(w_self, w_starttype, name): space = w_self.space - # XXX Optimize this with method cache look = False for w_class in w_self.mro_w: if w_class is w_starttype: diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -392,7 +392,11 @@ ('list', r_list.lowleveltype), ('index', Signed))) self.ll_listiter = ll_listiter - self.ll_listnext = ll_listnext + if (isinstance(r_list, FixedSizeListRepr) + and not r_list.listitem.mutated): + self.ll_listnext = ll_listnext_foldable + else: + self.ll_listnext = ll_listnext self.ll_getnextindex = ll_getnextindex def ll_listiter(ITERPTR, lst): @@ -409,5 +413,14 @@ iter.index = index + 1 # cannot overflow because index < l.length return l.ll_getitem_fast(index) +def ll_listnext_foldable(iter): + from pypy.rpython.rlist import ll_getitem_foldable_nonneg + l = iter.list + index = iter.index + if index >= l.ll_length(): + raise StopIteration + iter.index = index + 1 # cannot overflow because index < l.length + return ll_getitem_foldable_nonneg(l, index) + def ll_getnextindex(iter): return iter.index diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -608,6 +608,11 @@ specified as 0 if the object is not varsized. The returned object is fully initialized and zero-filled.""" # + # Here we really need a valid 'typeid', not 0 (as the JIT might + # try to send us if there is still a bug). + ll_assert(bool(self.combine(typeid, 0)), + "external_malloc: typeid == 0") + # # Compute the total size, carefully checking for overflows. size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + self.fixed_size(typeid) diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -8,6 +8,7 @@ from pypy.rpython.rlist import * from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist from pypy.rpython.lltypesystem import rlist as ll_rlist +from pypy.rpython.llinterp import LLException from pypy.rpython.ootypesystem import rlist as oo_rlist from pypy.rpython.rint import signed_repr from pypy.objspace.flow.model import Constant, Variable @@ -1477,6 +1478,80 @@ assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') + def test_iterate_over_immutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_immutable_list_quasiimmut_attr(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + class Foo: + _immutable_fields_ = ['lst?[*]'] + lst = list('abcdef') + foo = Foo() + def dummyfn(): + total = 0 + for c in foo.lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_mutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + lst[0] = 'x' + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + res = self.interpret(dummyfn, []) + assert res == sum(map(ord, 'abcdef')) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + class TestOOtype(BaseTestRlist, OORtypeMixin): rlist = oo_rlist type_system = 'ootype' From noreply at buildbot.pypy.org Thu Mar 1 09:07:35 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:35 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: access to global builtin objects Message-ID: <20120301080735.B3DFE8204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53037:95808653de2b Date: 2012-02-29 14:47 -0800 http://bitbucket.org/pypy/pypy/changeset/95808653de2b/ Log: access to global builtin objects diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -67,7 +67,7 @@ return method -def make_datamember(cppdm): +def make_data_member(cppdm): rettype = cppdm.get_returntype() if not rettype: # return builtin type cppclass = None @@ -103,7 +103,7 @@ # static ones also to the meta class (needed for property setters) for dm in cppns.get_data_member_names(): cppdm = cppns.get_data_member(dm) - pydm = make_datamember(cppdm) + pydm = make_data_member(cppdm) nsdct[dm] = pydm setattr(metans, dm, pydm) @@ -178,7 +178,7 @@ # static ones also to the meta class (needed for property setters) for dm_name in cpptype.get_data_member_names(): cppdm = cpptype.get_data_member(dm_name) - pydm = make_datamember(cppdm) + pydm = make_data_member(cppdm) setattr(pycpptype, dm_name, pydm) if cppdm.is_static(): @@ -199,23 +199,35 @@ scope = gbl fullname = name - # lookup class ... + # lookup if already created (e.g. as a function return type) try: return _existing_cppitems[fullname] except KeyError: pass - # ... if lookup failed, create (classes, templates, functions) + # ... if lookup failed, create as appropriate pycppitem = None + # namespaces are "open"; TODO: classes are too (template methods, inner classes ...) + if isinstance(scope, CppyyNamespaceMeta): + global _loaded_dictionaries_isdirty + if _loaded_dictionaries_isdirty: # TODO: this should be per namespace + if not scope._cpp_proxy: + scope._cpp_proxy = cppyy._type_byname(scope.__name__) + scope._cpp_proxy.update() # TODO: this is currently quadratic + _loaded_dictionaries_isdirty = False + + # classes cppitem = cppyy._type_byname(fullname) if cppitem: if cppitem.is_namespace(): pycppitem = make_cppnamespace(fullname, cppitem) else: pycppitem = make_cppclass(fullname, cppitem) + _existing_cppitems[fullname] = pycppitem scope.__dict__[name] = pycppitem + # templates if not cppitem: cppitem = cppyy._template_byname(fullname) if cppitem: @@ -223,18 +235,29 @@ _existing_cppitems[fullname] = pycppitem scope.__dict__[name] = pycppitem - if not cppitem and isinstance(scope, CppyyNamespaceMeta): - global _loaded_dictionaries_isdirty - if _loaded_dictionaries_isdirty: # TODO: this should've been per namespace - scope._cpp_proxy.update() # TODO: this is currently quadratic - cppitem = scope._cpp_proxy.get_overload(name) - pycppitem = make_static_function(scope._cpp_proxy, name, cppitem) - setattr(scope.__class__, name, pycppitem) - pycppitem = getattr(scope, name) - _loaded_dictionaries_isdirty = False + # functions + if not cppitem: + try: + cppitem = scope._cpp_proxy.get_overload(name) + pycppitem = make_static_function(scope._cpp_proxy, name, cppitem) + setattr(scope.__class__, name, pycppitem) + pycppitem = getattr(scope, name) # binds function as needed + except AttributeError: + pass + + # data + if not cppitem: + try: + cppitem = scope._cpp_proxy.get_data_member(name) + pycppitem = make_data_member(cppitem) + setattr(scope, name, pycppitem) + if cppitem.is_static(): + setattr(scope.__class__, name, pycppitem) + pycppitem = getattr(scope, name) # gets actual property value + except AttributeError: + pass if pycppitem: - _existing_cppitems[fullname] = pycppitem return pycppitem raise AttributeError("'%s' has no attribute '%s'", (str(scope), name)) diff --git a/pypy/module/cppyy/test/datatypes.cxx b/pypy/module/cppyy/test/datatypes.cxx --- a/pypy/module/cppyy/test/datatypes.cxx +++ b/pypy/module/cppyy/test/datatypes.cxx @@ -1,6 +1,7 @@ #include "datatypes.h" +//=========================================================================== cppyy_test_data::cppyy_test_data() : m_owns_arrays(false) { m_bool = false; @@ -72,7 +73,7 @@ } } -// getters +//- getters ----------------------------------------------------------------- bool cppyy_test_data::get_bool() { return m_bool; } char cppyy_test_data::get_char() { return m_char; } unsigned char cppyy_test_data::get_uchar() { return m_uchar; } @@ -103,7 +104,7 @@ double* cppyy_test_data::get_double_array() { return m_double_array; } double* cppyy_test_data::get_double_array2() { return m_double_array2; } -// setters +//- setters ----------------------------------------------------------------- void cppyy_test_data::set_bool(bool b) { m_bool = b; } void cppyy_test_data::set_char(char c) { m_char = c; } void cppyy_test_data::set_uchar(unsigned char uc) { m_uchar = uc; } @@ -127,6 +128,8 @@ float cppyy_test_data::s_float = -404.f; double cppyy_test_data::s_double = -505.; + +//= global functions ======================================================== long get_pod_address(cppyy_test_data& c) { return (long)&c.m_pod; @@ -141,3 +144,28 @@ { return (long)&c.m_pod.m_double; } + +//= global variables/pointers =============================================== +int g_int = 42; + +void set_global_int(int i) { + g_int = i; +} + +int get_global_int() { + return g_int; +} + +cppyy_test_pod* g_pod = (cppyy_test_pod*)0; + +bool is_global_pod(cppyy_test_pod* t) { + return t == g_pod; +} + +void set_global_pod(cppyy_test_pod* t) { + g_pod = t; +} + +cppyy_test_pod* get_global_pod() { + return g_pod; +} diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -114,7 +114,18 @@ }; -// global functions +//= global functions ======================================================== long get_pod_address(cppyy_test_data& c); long get_int_address(cppyy_test_data& c); long get_double_address(cppyy_test_data& c); + + +//= global variables/pointers =============================================== +extern int g_int; +void set_global_int(int i); +int get_global_int(); + +extern cppyy_test_pod* g_pod; +bool is_global_pod(cppyy_test_pod* t); +void set_global_pod(cppyy_test_pod* t); +cppyy_test_pod* get_global_pod(); diff --git a/pypy/module/cppyy/test/datatypes.xml b/pypy/module/cppyy/test/datatypes.xml --- a/pypy/module/cppyy/test/datatypes.xml +++ b/pypy/module/cppyy/test/datatypes.xml @@ -3,7 +3,12 @@ + + + + + diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -233,7 +233,7 @@ assert b.m_a == 11 assert b.m_da == 11.11 assert b.m_b == 22 - # assert b.get_value() == 22 + assert b.get_value() == 22 b.m_db = 22.22 assert b.m_db == 22.22 @@ -257,7 +257,7 @@ assert c1.m_a == 11 assert c1.m_b == 22 assert c1.m_c == 33 - # assert c1.get_value() == 33 + assert c1.get_value() == 33 c1.destruct() @@ -285,7 +285,7 @@ assert d.m_b == 22 assert d.m_c == 33 assert d.m_d == 44 - # assert d.get_value() == 44 + assert d.get_value() == 44 d.destruct() diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -114,7 +114,6 @@ e2.destruct() assert t.get_overload("getCount").call(None, None) == 0 - raises(TypeError, t.get_overload("addDataToInt").call, 41, None, 4) def test05_memory(self): diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -332,3 +332,21 @@ raises(TypeError, c.m_int, 1.) c.destruct() + + def test09_global_builtin_type(self): + """Test access to a global builtin type""" + + import cppyy + gbl = cppyy.gbl + + import pprint + pprint.pprint(dir(gbl)) + assert gbl.g_int == gbl.get_global_int() + + gbl.set_global_int(32) + assert gbl.get_global_int() == 32 + assert gbl.g_int == 32 + + gbl.g_int = 22 + assert gbl.get_global_int() == 22 + assert gbl.g_int == 22 From noreply at buildbot.pypy.org Thu Mar 1 09:07:36 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:36 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: support for global pointer types Message-ID: <20120301080736.EAC2E8204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53038:239a04b34d26 Date: 2012-02-29 16:44 -0800 http://bitbucket.org/pypy/pypy/changeset/239a04b34d26/ Log: support for global pointer types diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -16,8 +16,19 @@ from pypy.module.cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: - assert lltype.typeOf(cppinstance.rawobject) == capi.C_OBJECT - return cppinstance.rawobject + rawobject = cppinstance.get_rawobject() + assert lltype.typeOf(rawobject) == capi.C_OBJECT + return rawobject + return capi.C_NULL_OBJECT + +def get_rawobject_nonnull(space, w_obj): + from pypy.module.cppyy.interp_cppyy import W_CPPInstance + cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) + if cppinstance: + cppinstance._nullcheck() + rawobject = cppinstance.get_rawobject() + assert lltype.typeOf(rawobject) == capi.C_OBJECT + return rawobject return capi.C_NULL_OBJECT @@ -31,7 +42,7 @@ pass def _get_raw_address(self, space, w_obj, offset): - rawobject = get_rawobject(space, w_obj) + rawobject = get_rawobject_nonnull(space, w_obj) assert lltype.typeOf(rawobject) == capi.C_OBJECT if rawobject: fieldptr = capi.direct_ptradd(rawobject, offset) @@ -123,7 +134,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy only the pointer value - rawobject = get_rawobject(space, w_obj) + rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) buf = space.buffer_w(w_value) try: @@ -500,9 +511,10 @@ obj = space.interpclass_w(w_obj) if isinstance(obj, W_CPPInstance): if capi.c_is_subtype(obj.cppclass.handle, self.cpptype.handle): + rawobject = obj.get_rawobject() offset = capi.c_base_offset( - obj.cppclass.handle, self.cpptype.handle, obj.rawobject) - obj_address = capi.direct_ptradd(obj.rawobject, offset) + obj.cppclass.handle, self.cpptype.handle, rawobject) + obj_address = capi.direct_ptradd(rawobject, offset) return rffi.cast(capi.C_OBJECT, obj_address) raise TypeError("cannot pass %s as %s" % (space.type(w_obj).getname(space, "?"), self.cpptype.name)) @@ -517,6 +529,14 @@ def convert_argument_libffi(self, space, w_obj, argchain): argchain.arg(self._unwrap_object(space, w_obj)) + def from_memory(self, space, w_obj, w_type, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.new_instance(space, w_type, self.cpptype, address, True, False) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) class InstanceConverter(InstancePtrConverter): _immutable_ = True @@ -524,7 +544,10 @@ def from_memory(self, space, w_obj, w_type, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) from pypy.module.cppyy import interp_cppyy - return interp_cppyy.new_instance(space, w_type, self.cpptype, address, False) + return interp_cppyy.new_instance(space, w_type, self.cpptype, address, False, False) + + def to_memory(self, space, w_obj, w_value, offset): + self._is_abstract(space) class StdStringConverter(InstanceConverter): diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -248,12 +248,12 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_l(cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, False) + return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, False, False) def execute_libffi(self, space, w_returntype, libffifunc, argchain): from pypy.module.cppyy import interp_cppyy ptr_result = rffi.cast(capi.C_OBJECT, libffifunc.call(argchain, rffi.VOIDP)) - return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, False) + return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, False, False) class InstanceExecutor(InstancePtrExecutor): @@ -263,7 +263,7 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_o(cppmethod, cppthis, num_args, args, self.cpptype.handle) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, True) + return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, False, True) def execute_libffi(self, space, w_returntype, libffifunc, argchain): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -225,7 +225,7 @@ except Exception: capi.c_deallocate(self.cpptype.handle, newthis) raise - return new_instance(self.space, w_type, self.cpptype, newthis, True) + return new_instance(self.space, w_type, self.cpptype, newthis, False, True) class W_CPPOverload(Wrappable): @@ -300,7 +300,7 @@ if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope_handle) offset = self.offset + capi.c_base_offset( - cppinstance.cppclass.handle, self.scope_handle, cppinstance.rawobject) + cppinstance.cppclass.handle, self.scope_handle, cppinstance.get_rawobject()) else: offset = self.offset return offset @@ -310,7 +310,7 @@ offset = self._get_offset(cppinstance) try: return self.converter.from_memory(self.space, w_cppinstance, w_type, offset) - except Exception, e: + except TypeError, e: raise OperationError(self.space.w_TypeError, self.space.wrap(str(e))) except ValueError, e: raise OperationError(self.space.w_ValueError, self.space.wrap(str(e))) @@ -490,7 +490,7 @@ def get_cppthis(self, cppinstance, scope_handle): assert self.handle == cppinstance.cppclass.handle - return cppinstance.rawobject + return cppinstance.get_rawobject() def is_namespace(self): return self.space.w_False @@ -521,8 +521,8 @@ def get_cppthis(self, cppinstance, scope_handle): assert self.handle == cppinstance.cppclass.handle - offset = capi.c_base_offset(self.handle, scope_handle, cppinstance.rawobject) - return capi.direct_ptradd(cppinstance.rawobject, offset) + offset = capi.c_base_offset(self.handle, scope_handle, cppinstance.get_rawobject()) + return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPType.typedef = TypeDef( 'ComplexCPPType', @@ -559,24 +559,34 @@ class W_CPPInstance(Wrappable): - _immutable_fields_ = ["cppclass"] + _immutable_fields_ = ["cppclass", "isref"] - def __init__(self, space, cppclass, rawobject, python_owns): + def __init__(self, space, cppclass, rawobject, isref, python_owns): self.space = space assert isinstance(cppclass, W_CPPType) self.cppclass = cppclass assert lltype.typeOf(rawobject) == capi.C_OBJECT - self.rawobject = rawobject + assert not isref or rawobject + self._rawobject = rawobject + assert not isref or not python_owns + self.isref = isref self.python_owns = python_owns def _nullcheck(self): - if not self.rawobject: + if not self._rawobject or (self.isref and not self.get_rawobject()): raise OperationError(self.space.w_ReferenceError, self.space.wrap("trying to access a NULL pointer")) + def get_rawobject(self): + if not self.isref: + return self._rawobject + else: + ptrptr = rffi.cast(rffi.VOIDPP, self._rawobject) + return rffi.cast(capi.C_OBJECT, ptrptr[0]) + def instance__eq__(self, w_other): other = self.space.interp_w(W_CPPInstance, w_other, can_be_None=False) - iseq = self.rawobject == other.rawobject + iseq = self._rawobject == other._rawobject return self.space.wrap(iseq) def instance__ne__(self, w_other): @@ -584,17 +594,16 @@ def destruct(self): assert isinstance(self, W_CPPInstance) - if self.rawobject: + if self._rawobject and not self.isref: memory_regulator.unregister(self) - capi.c_destruct(self.cppclass.handle, self.rawobject) - self.rawobject = capi.C_NULL_OBJECT + capi.c_destruct(self.cppclass.handle, self._rawobject) + self._rawobject = capi.C_NULL_OBJECT def __del__(self): if self.python_owns: self.enqueue_for_destruction(self.space, W_CPPInstance.destruct, '__del__() method of ') - W_CPPInstance.typedef = TypeDef( 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), @@ -617,11 +626,11 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): - int_address = int(rffi.cast(rffi.LONG, obj.rawobject)) + int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): - int_address = int(rffi.cast(rffi.LONG, obj.rawobject)) + int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) def retrieve(self, address): @@ -631,19 +640,19 @@ memory_regulator = MemoryRegulator() -def new_instance(space, w_type, cpptype, rawobject, python_owns): +def new_instance(space, w_type, cpptype, rawobject, isref, python_owns): obj = memory_regulator.retrieve(rawobject) if obj and obj.cppclass == cpptype: return obj w_cppinstance = space.allocate_instance(W_CPPInstance, w_type) cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) - W_CPPInstance.__init__(cppinstance, space, cpptype, rawobject, python_owns) + W_CPPInstance.__init__(cppinstance, space, cpptype, rawobject, isref, python_owns) memory_regulator.register(cppinstance) return w_cppinstance @unwrap_spec(cppinstance=W_CPPInstance) def addressof(space, cppinstance): - address = rffi.cast(rffi.LONG, cppinstance.rawobject) + address = rffi.cast(rffi.LONG, cppinstance.get_rawobject()) return space.wrap(address) @unwrap_spec(address=int, owns=bool) @@ -656,4 +665,4 @@ if obj and obj.cppclass == cpptype: return obj - return new_instance(space, w_type, cpptype, rawobject, owns) + return new_instance(space, w_type, cpptype, rawobject, False, owns) diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -339,8 +339,6 @@ import cppyy gbl = cppyy.gbl - import pprint - pprint.pprint(dir(gbl)) assert gbl.g_int == gbl.get_global_int() gbl.set_global_int(32) @@ -350,3 +348,34 @@ gbl.g_int = 22 assert gbl.get_global_int() == 22 assert gbl.g_int == 22 + + def test10_global_ptr(self): + """Test access of global objects through a pointer""" + + import cppyy + gbl = cppyy.gbl + + raises(ReferenceError, 'gbl.g_pod.m_int') + + c = gbl.cppyy_test_pod() + c.m_int = 42 + c.m_double = 3.14 + + gbl.set_global_pod(c) + assert gbl.is_global_pod(c) + assert gbl.g_pod.m_int == 42 + assert gbl.g_pod.m_double == 3.14 + + d = gbl.get_global_pod() + assert gbl.is_global_pod(d) + assert c == d + assert id(c) == id(d) + + e = gbl.cppyy_test_pod() + e.m_int = 43 + e.m_double = 2.14 + + gbl.g_pod = e + assert gbl.is_global_pod(e) + assert gbl.g_pod.m_int == 43 + assert gbl.g_pod.m_double == 2.14 From noreply at buildbot.pypy.org Thu Mar 1 09:07:38 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:38 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: global variables and pointers for CINT backend Message-ID: <20120301080738.26A0E8204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53039:33cb5ec4fc46 Date: 2012-02-29 23:08 -0800 http://bitbucket.org/pypy/pypy/changeset/33cb5ec4fc46/ Log: global variables and pointers for CINT backend diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -91,30 +91,25 @@ return property(binder, setter) -def update_cppnamespace(nsdct, metans): - cppns = nsdct["_cpp_proxy"] - - # insert static methods into the "namespace" dictionary - for func_name in cppns.get_method_names(): - cppol = cppns.get_overload(func_name) - nsdct[func_name] = make_static_function(cppns, func_name, cppol) - - # add all data members to the dictionary of the class to be created, and - # static ones also to the meta class (needed for property setters) - for dm in cppns.get_data_member_names(): - cppdm = cppns.get_data_member(dm) - pydm = make_data_member(cppdm) - nsdct[dm] = pydm - setattr(metans, dm, pydm) - -def make_cppnamespace(namespace_name, cppns, update=True): +def make_cppnamespace(namespace_name, cppns, build_in_full=True): nsdct = {"_cpp_proxy" : cppns } # create a meta class to allow properties (for static data write access) metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) - if update: - update_cppnamespace(nsdct, metans) + if build_in_full: # if False, rely on lazy build-up + # insert static methods into the "namespace" dictionary + for func_name in cppns.get_method_names(): + cppol = cppns.get_overload(func_name) + nsdct[func_name] = make_static_function(cppns, func_name, cppol) + + # add all data members to the dictionary of the class to be created, and + # static ones also to the meta class (needed for property setters) + for dm in cppns.get_data_member_names(): + cppdm = cppns.get_data_member(dm) + pydm = make_data_member(cppdm) + nsdct[dm] = pydm + setattr(metans, dm, pydm) # create the python-side C++ namespace representation pycppns = metans(namespace_name, (object,), nsdct) @@ -316,7 +311,6 @@ # cause the creation of classes in the global namespace, so gbl must exist at # that point to cache them) gbl = make_cppnamespace("::", cppyy._type_byname(""), False) # global C++ namespace -update_cppnamespace(gbl.__dict__, type(gbl)) # mostly for the benefit of CINT, which treats std as special gbl.std = make_cppnamespace("std", cppyy._type_byname("std"), False) diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -17,12 +17,13 @@ #include "TClassEdit.h" #include "TClassRef.h" #include "TDataMember.h" +#include "TFunction.h" +#include "TGlobal.h" #include "TMethod.h" #include "TMethodArg.h" #include #include -#include #include #include #include @@ -45,10 +46,14 @@ class ClassRefsInit { public: - ClassRefsInit() { // setup dummy holder for global namespace + ClassRefsInit() { // setup dummy holders for global and std namespaces assert(g_classrefs.size() == (ClassRefs_t::size_type)GLOBAL_HANDLE); g_classref_indices[""] = (ClassRefs_t::size_type)GLOBAL_HANDLE; g_classrefs.push_back(TClassRef("")); + g_classref_indices["std"] = g_classrefs.size(); + g_classrefs.push_back(TClassRef("")); // CINT ignores std + g_classref_indices["::std"] = g_classrefs.size(); + g_classrefs.push_back(TClassRef("")); // id. } }; static ClassRefsInit _classrefs_init; @@ -56,6 +61,9 @@ typedef std::vector GlobalFuncs_t; static GlobalFuncs_t g_globalfuncs; +typedef std::vector GlobalVars_t; +static GlobalVars_t g_globalvars; + /* initialization of th ROOT system (debatable ... ) ---------------------- */ namespace { @@ -230,7 +238,7 @@ } void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - cppyy_call_T(method, self, nargs, args); + cppyy_call_T(method, self, nargs, args); } int cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -277,9 +285,9 @@ G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); if (result.ref && *(long*)result.ref) { - char* charp = cppstring_to_cstring(*(std::string*)result.ref); - delete (std::string*)result.ref; - return charp; + char* charp = cppstring_to_cstring(*(std::string*)result.ref); + delete (std::string*)result.ref; + return charp; } return cppstring_to_cstring(""); } @@ -404,7 +412,7 @@ /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t handle) { TClassRef cr = type_from_handle(handle); - if (cr.GetClass() && cr->GetListOfMethods()) + if (cr.GetClass() && cr->GetListOfMethods()) return cr->GetListOfMethods()->GetSize(); else if (strcmp(cr.GetClassName(), "") == 0) { // NOTE: the updated list of global funcs grows with 5 "G__ateval"'s just @@ -452,8 +460,8 @@ } char* cppyy_method_arg_default(cppyy_scope_t, int, int) { -/* unused: libffi does not work with CINT back-end */ - return cppstring_to_cstring(""); + /* unused: libffi does not work with CINT back-end */ + return cppstring_to_cstring(""); } cppyy_method_t cppyy_get_method(cppyy_scope_t handle, int method_index) { @@ -480,48 +488,80 @@ int cppyy_num_data_members(cppyy_scope_t handle) { TClassRef cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfDataMembers()) - return cr->GetListOfDataMembers()->GetSize(); + return cr->GetListOfDataMembers()->GetSize(); + else if (strcmp(cr.GetClassName(), "") == 0) { + TCollection* vars = gROOT->GetListOfGlobals(kTRUE); + if (g_globalvars.size() != (GlobalVars_t::size_type)vars->GetSize()) { + g_globalvars.clear(); + g_globalvars.reserve(vars->GetSize()); + + TIter ivar(vars); + + TGlobal* var = 0; + while ((var = (TGlobal*)ivar.Next())) + g_globalvars.push_back(var); + } + return (int)g_globalvars.size(); + } return 0; } char* cppyy_data_member_name(cppyy_scope_t handle, int data_member_index) { TClassRef cr = type_from_handle(handle); - TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); - return cppstring_to_cstring(m->GetName()); + if (cr.GetClass()) { + TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); + return cppstring_to_cstring(m->GetName()); + } + TGlobal* gbl = g_globalvars[data_member_index]; + return cppstring_to_cstring(gbl->GetName()); } char* cppyy_data_member_type(cppyy_scope_t handle, int data_member_index) { TClassRef cr = type_from_handle(handle); - TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); - std::string fullType = m->GetFullTypeName(); - if ((int)m->GetArrayDim() > 1 || (!m->IsBasic() && m->IsaPointer())) - fullType.append("*"); - else if ((int)m->GetArrayDim() == 1) { - std::ostringstream s; - s << '[' << m->GetMaxIndex(0) << ']' << std::ends; - fullType.append(s.str()); + if (cr.GetClass()) { + TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); + std::string fullType = m->GetFullTypeName(); + if ((int)m->GetArrayDim() > 1 || (!m->IsBasic() && m->IsaPointer())) + fullType.append("*"); + else if ((int)m->GetArrayDim() == 1) { + std::ostringstream s; + s << '[' << m->GetMaxIndex(0) << ']' << std::ends; + fullType.append(s.str()); + } + return cppstring_to_cstring(fullType); } - return cppstring_to_cstring(fullType); + TGlobal* gbl = g_globalvars[data_member_index]; + return cppstring_to_cstring(gbl->GetFullTypeName()); } size_t cppyy_data_member_offset(cppyy_scope_t handle, int data_member_index) { TClassRef cr = type_from_handle(handle); - TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); - return m->GetOffsetCint(); + if (cr.GetClass()) { + TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); + return (size_t)m->GetOffsetCint(); + } + TGlobal* gbl = g_globalvars[data_member_index]; + return (size_t)gbl->GetAddress(); } /* data member properties ------------------------------------------------ */ int cppyy_is_publicdata(cppyy_scope_t handle, int data_member_index) { TClassRef cr = type_from_handle(handle); - TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); - return m->Property() & G__BIT_ISPUBLIC; + if (cr.GetClass()) { + TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); + return m->Property() & G__BIT_ISPUBLIC; + } + return 1; // global data is always public } int cppyy_is_staticdata(cppyy_scope_t handle, int data_member_index) { TClassRef cr = type_from_handle(handle); - TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); - return m->Property() & G__BIT_ISSTATIC; + if (cr.GetClass()) { + TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(data_member_index); + return m->Property() & G__BIT_ISSTATIC; + } + return 1; // global data is always static } @@ -539,11 +579,11 @@ } cppyy_object_t cppyy_charp2stdstring(const char* str) { - return (cppyy_object_t)new std::string(str); + return (cppyy_object_t)new std::string(str); } cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr) { - return (cppyy_object_t)new std::string(*(std::string*)ptr); + return (cppyy_object_t)new std::string(*(std::string*)ptr); } void cppyy_free_stdstring(cppyy_object_t ptr) { @@ -551,7 +591,7 @@ } void* cppyy_load_dictionary(const char* lib_name) { - if (0 <= gSystem->Load(lib_name)) - return (void*)1; - return (void*)0; + if (0 <= gSystem->Load(lib_name)) + return (void*)1; + return (void*)0; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -121,7 +121,7 @@ } void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return (void*)cppyy_call_T(method, self, nargs, args); + return (void*)cppyy_call_T(method, self, nargs, args); } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -134,7 +134,7 @@ cppyy_object_t cppyy_call_o(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t result_type) { - void* result = (void*)cppyy_allocate(result_type); + void* result = (void*)cppyy_allocate(result_type); std::vector arguments = build_args(nargs, args); Reflex::StubFunction stub = (Reflex::StubFunction)method; stub(result, (void*)self, arguments, NULL /* stub context */); @@ -390,11 +390,11 @@ } cppyy_object_t cppyy_charp2stdstring(const char* str) { - return (cppyy_object_t)new std::string(str); + return (cppyy_object_t)new std::string(str); } cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr) { - return (cppyy_object_t)new std::string(*(std::string*)ptr); + return (cppyy_object_t)new std::string(*(std::string*)ptr); } void cppyy_free_stdstring(cppyy_object_t ptr) { diff --git a/pypy/module/cppyy/test/datatypes_LinkDef.h b/pypy/module/cppyy/test/datatypes_LinkDef.h --- a/pypy/module/cppyy/test/datatypes_LinkDef.h +++ b/pypy/module/cppyy/test/datatypes_LinkDef.h @@ -6,9 +6,19 @@ #pragma link C++ struct cppyy_test_pod; #pragma link C++ class cppyy_test_data; + #pragma link C++ function get_pod_address(cppyy_test_data&); #pragma link C++ function get_int_address(cppyy_test_data&); #pragma link C++ function get_double_address(cppyy_test_data&); -#pragma link C++ variable N; +#pragma link C++ function set_global_int(int); +#pragma link C++ function get_global_int(); + +#pragma link C++ function is_global_pod(cppyy_test_pod*); +#pragma link C++ function set_global_pod(cppyy_test_pod*); +#pragma link C++ function get_global_pod(); + +#pragma link C++ global N; +#pragma link C++ global g_int; +#pragma link C++ global g_pod; #endif diff --git a/pypy/module/cppyy/test/stltypes_LinkDef.h b/pypy/module/cppyy/test/stltypes_LinkDef.h --- a/pypy/module/cppyy/test/stltypes_LinkDef.h +++ b/pypy/module/cppyy/test/stltypes_LinkDef.h @@ -4,9 +4,6 @@ #pragma link off all classes; #pragma link off all functions; -//#pragma link C++ class std::vector; -//#pragma link C++ class std::vector::iterator; -//#pragma link C++ class std::vector::const_iterator; #pragma link C++ class std::vector; #pragma link C++ class std::vector::iterator; #pragma link C++ class std::vector::const_iterator; From noreply at buildbot.pypy.org Thu Mar 1 09:07:39 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:39 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: fix typo Message-ID: <20120301080739.528078204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53040:0dc4a4c9163a Date: 2012-03-01 00:02 -0800 http://bitbucket.org/pypy/pypy/changeset/0dc4a4c9163a/ Log: fix typo diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -255,7 +255,7 @@ if pycppitem: return pycppitem - raise AttributeError("'%s' has no attribute '%s'", (str(scope), name)) + raise AttributeError("'%s' has no attribute '%s'" % (str(scope), name)) get_cppclass = get_cppitem # TODO: restrict to classes only (?) From noreply at buildbot.pypy.org Thu Mar 1 09:07:40 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 Mar 2012 09:07:40 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: merge default Message-ID: <20120301080740.921688204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53041:d33d193ce67a Date: 2012-03-01 00:05 -0800 http://bitbucket.org/pypy/pypy/changeset/d33d193ce67a/ Log: merge default diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,48 +398,38 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) - def test_maybe_issue1045_related(self): + def test_issue1045(self): ops = """ - [p8] - p54 = getfield_gc(p8, descr=valuedescr) - mark_opaque_ptr(p54) - i55 = getfield_gc(p54, descr=nextdescr) - p57 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p57, i55, descr=otherdescr) - p69 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p69, i55, descr=otherdescr) - i71 = int_eq(i55, -9223372036854775808) - guard_false(i71) [] - i73 = int_mod(i55, 2) - i75 = int_rshift(i73, 63) - i76 = int_and(2, i75) - i77 = int_add(i73, i76) - p79 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p79, i77, descr=otherdescr) - i81 = int_eq(i77, 1) - guard_false(i81) [] - i0 = int_ge(i55, 1) - guard_true(i0) [] - label(p57) - jump(p57) - """ - expected = """ - [p8] - p54 = getfield_gc(p8, descr=valuedescr) - i55 = getfield_gc(p54, descr=nextdescr) - i71 = int_eq(i55, -9223372036854775808) - guard_false(i71) [] + [i55] i73 = int_mod(i55, 2) i75 = int_rshift(i73, 63) i76 = int_and(2, i75) i77 = int_add(i73, i76) i81 = int_eq(i77, 1) - guard_false(i81) [] i0 = int_ge(i55, 1) guard_true(i0) [] label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) jump(i55) """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ self.optimize_loop(ops, expected) class OptRenameStrlen(Optimization): @@ -467,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -260,7 +260,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,7 +268,9 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) if self.optimizer.loop.logops: diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2349,7 +2349,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -1101,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -326,8 +326,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): From noreply at buildbot.pypy.org Thu Mar 1 13:08:41 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:41 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: call _emit_guard in emit_guard_class Message-ID: <20120301120841.62A538204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53042:a701f03c5ad4 Date: 2012-02-29 03:22 -0800 http://bitbucket.org/pypy/pypy/changeset/a701f03c5ad4/ Log: call _emit_guard in emit_guard_class diff --git a/pypy/jit/backend/ppc/opassembler.py b/pypy/jit/backend/ppc/opassembler.py --- a/pypy/jit/backend/ppc/opassembler.py +++ b/pypy/jit/backend/ppc/opassembler.py @@ -257,6 +257,7 @@ def emit_guard_class(self, op, arglocs, regalloc): self._cmp_guard_class(op, arglocs, regalloc) + self._emit_guard(op, arglocs[3:], c.NE, save_exc=False) def emit_guard_nonnull_class(self, op, arglocs, regalloc): offset = self.cpu.vtable_offset From noreply at buildbot.pypy.org Thu Mar 1 13:08:42 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:42 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): implement card marking Message-ID: <20120301120842.9184D820D1@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53043:e55880d705f1 Date: 2012-03-01 03:18 -0800 http://bitbucket.org/pypy/pypy/changeset/e55880d705f1/ Log: (bivab, hager): implement card marking diff --git a/pypy/jit/backend/ppc/opassembler.py b/pypy/jit/backend/ppc/opassembler.py --- a/pypy/jit/backend/ppc/opassembler.py +++ b/pypy/jit/backend/ppc/opassembler.py @@ -892,14 +892,15 @@ if opnum == rop.COND_CALL_GC_WB: N = 2 addr = descr.get_write_barrier_fn(self.cpu) + card_marking = False elif opnum == rop.COND_CALL_GC_WB_ARRAY: N = 3 addr = descr.get_write_barrier_from_array_fn(self.cpu) assert addr != 0 + card_marking = descr.jit_wb_cards_set != 0 else: raise AssertionError(opnum) loc_base = arglocs[0] - with scratch_reg(self.mc): self.mc.load(r.SCRATCH.value, loc_base.value, 0) @@ -922,6 +923,33 @@ jz_location = self.mc.currpos() self.mc.nop() + # for cond_call_gc_wb_array, also add another fast path: + # if GCFLAG_CARDS_SET, then we can just set one bit and be done + if card_marking: + with scratch_reg(self.mc): + self.mc.load(r.SCRATCH.value, loc_base.value, 0) + + # get the position of the bit we want to test + bitpos = descr.jit_wb_cards_set_bitpos + + if IS_PPC_32: + # put this bit to the rightmost bitposition of r0 + if bitpos > 0: + self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, + 32 - bitpos, 31, 31) + else: + if bitpos > 0: + self.mc.rldicl(r.SCRATCH.value, r.SCRATCH.value, + 64 - bitpos, 63) + + # test whether this bit is set + self.mc.cmp_op(0, r.SCRATCH.value, 1, imm=True) + + jnz_location = self.mc.currpos() + self.mc.nop() + else: + jnz_location = 0 + # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. with Saved_Volatiles(self.mc): @@ -936,6 +964,57 @@ # is not going to call anything more. self.mc.call(func) + # if GCFLAG_CARDS_SET, then we can do the whole thing that would + # be done in the CALL above with just four instructions, so here + # is an inline copy of them + if card_marking: + with scratch_reg(self.mc): + jmp_location = self.mc.currpos() + self.mc.nop() # jump to the exit, patched later + # patch the JNZ above + offset = self.mc.currpos() + pmc = OverwritingBuilder(self.mc, jnz_location, 1) + pmc.bc(12, 2, offset - jnz_location) # jump on equality + pmc.overwrite() + # + loc_index = arglocs[1] + assert loc_index.is_reg() + tmp1 = arglocs[-2] + tmp2 = arglocs[-1] + #byteofs + s = 3 + descr.jit_wb_card_page_shift + + # use r20 as temporay register, save it in FORCE INDEX slot + temp_reg = r.r20 + ENCODING_AREA = len(r.MANAGED_REGS) * WORD + self.mc.store(temp_reg.value, r.SPP.value, ENCODING_AREA) + + self.mc.srli_op(temp_reg.value, loc_index.value, s) + self.mc.not_(temp_reg.value, temp_reg.value) + + # byte_index + self.mc.li(r.SCRATCH.value, 7) + self.mc.srli_op(loc_index.value, loc_index.value, + descr.jit_wb_card_page_shift) + self.mc.and_(tmp1.value, r.SCRATCH.value, loc_index.value) + + # set the bit + self.mc.li(tmp2.value, 1) + self.mc.lbzx(r.SCRATCH.value, loc_base.value, temp_reg.value) + self.mc.sl_op(tmp2.value, tmp2.value, tmp1.value) + self.mc.or_(r.SCRATCH.value, r.SCRATCH.value, tmp2.value) + self.mc.stbx(r.SCRATCH.value, loc_base.value, temp_reg.value) + # done + + # patch the JMP above + offset = self.mc.currpos() + pmc = OverwritingBuilder(self.mc, jmp_location, 1) + pmc.b(offset - jmp_location) + pmc.overwrite() + + # restore temporary register r20 + self.mc.load(temp_reg.value, r.SPP.value, ENCODING_AREA) + # patch the JZ above offset = self.mc.currpos() - jz_location pmc = OverwritingBuilder(self.mc, jz_location, 1) From noreply at buildbot.pypy.org Thu Mar 1 13:08:47 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:47 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): remove comment, don't call assemble Message-ID: <20120301120847.5ED7E8236F@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53047:fc6ad30e4bbf Date: 2012-03-01 04:04 -0800 http://bitbucket.org/pypy/pypy/changeset/fc6ad30e4bbf/ Log: (bivab, hager): remove comment, don't call assemble diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py --- a/pypy/jit/backend/ppc/codebuilder.py +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -1098,10 +1098,9 @@ self.sld(target_reg, from_reg, numbit_reg) def prepare_insts_blocks(self, show=False): - self.assemble(show) insts = self.insts for inst in insts: - self.write32(inst)#.assemble()) + self.write32(inst) def _dump_trace(self, addr, name, formatter=-1): if not we_are_translated(): From noreply at buildbot.pypy.org Thu Mar 1 13:08:48 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:48 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): add XXX comment Message-ID: <20120301120848.9111D8244B@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53048:6bd5237a7e4d Date: 2012-03-01 04:06 -0800 http://bitbucket.org/pypy/pypy/changeset/6bd5237a7e4d/ Log: (bivab, hager): add XXX comment diff --git a/pypy/jit/backend/ppc/assembler.py b/pypy/jit/backend/ppc/assembler.py --- a/pypy/jit/backend/ppc/assembler.py +++ b/pypy/jit/backend/ppc/assembler.py @@ -38,6 +38,7 @@ def get_number_of_ops(self): return len(self.insts) + # XXX don't need multiplication def get_rel_pos(self): return 4 * len(self.insts) From noreply at buildbot.pypy.org Thu Mar 1 13:08:43 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:43 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): forgot to commit this the last time Message-ID: <20120301120843.BF0D68236C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53044:5632f53a7d1e Date: 2012-03-01 03:51 -0800 http://bitbucket.org/pypy/pypy/changeset/5632f53a7d1e/ Log: (bivab, hager): forgot to commit this the last time diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py --- a/pypy/jit/backend/ppc/codebuilder.py +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -1085,6 +1085,18 @@ else: self.stdx(from_reg, base_reg, offset_reg) + def srli_op(self, target_reg, from_reg, numbits): + if IS_PPC_32: + self.srwi(target_reg, from_reg, numbits) + else: + self.srdi(target_reg, from_reg, numbits) + + def sl_op(self, target_reg, from_reg, numbit_reg): + if IS_PPC_32: + self.slw(target_reg, from_reg, numbit_reg) + else: + self.sld(target_reg, from_reg, numbit_reg) + def prepare_insts_blocks(self, show=False): self.assemble(show) insts = self.insts From noreply at buildbot.pypy.org Thu Mar 1 13:08:49 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:49 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: merge Message-ID: <20120301120849.CC1328204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53049:36ea49e24efc Date: 2012-03-01 04:07 -0800 http://bitbucket.org/pypy/pypy/changeset/36ea49e24efc/ Log: merge diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -317,7 +317,7 @@ for _ in range(6): mc.write32(0) frame_size = (# add space for floats later - + BACKCHAIN_SIZE * WORD) + + (BACKCHAIN_SIZE + MAX_REG_PARAMS) * WORD) if IS_PPC_32: mc.stwu(r.SP.value, r.SP.value, -frame_size) mc.mflr(r.SCRATCH.value) From noreply at buildbot.pypy.org Thu Mar 1 13:08:44 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:44 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: also test card marking Message-ID: <20120301120844.EF2EB8236D@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53045:1221d2e20ecc Date: 2012-03-01 03:53 -0800 http://bitbucket.org/pypy/pypy/changeset/1221d2e20ecc/ Log: also test card marking diff --git a/pypy/jit/backend/ppc/test/test_runner.py b/pypy/jit/backend/ppc/test/test_runner.py --- a/pypy/jit/backend/ppc/test/test_runner.py +++ b/pypy/jit/backend/ppc/test/test_runner.py @@ -23,9 +23,6 @@ cls.cpu = PPC_64_CPU(rtyper=None, stats=FakeStats()) cls.cpu.setup_once() - def test_cond_call_gc_wb_array_card_marking_fast_path(self): - py.test.skip("unsure what to do here") - def test_compile_loop_many_int_args(self): for numargs in range(2, 16): for _ in range(numargs): From noreply at buildbot.pypy.org Thu Mar 1 13:08:46 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:08:46 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): port fixes to GUARD CLASS from ARM backend Message-ID: <20120301120846.287988236E@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53046:921d7eb791cc Date: 2012-03-01 03:55 -0800 http://bitbucket.org/pypy/pypy/changeset/921d7eb791cc/ Log: (bivab, hager): port fixes to GUARD CLASS from ARM backend diff --git a/pypy/jit/backend/ppc/opassembler.py b/pypy/jit/backend/ppc/opassembler.py --- a/pypy/jit/backend/ppc/opassembler.py +++ b/pypy/jit/backend/ppc/opassembler.py @@ -241,32 +241,31 @@ emit_guard_nonnull = emit_guard_true emit_guard_isnull = emit_guard_false - def _cmp_guard_class(self, op, locs, regalloc): - offset = locs[2] - if offset is not None: - with scratch_reg(self.mc): - if offset.is_imm(): - self.mc.load(r.SCRATCH.value, locs[0].value, offset.value) - else: - assert offset.is_reg() - self.mc.loadx(r.SCRATCH.value, locs[0].value, offset.value) - self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value) - else: - assert 0, "not implemented yet" - self._emit_guard(op, locs[3:], c.NE) - def emit_guard_class(self, op, arglocs, regalloc): self._cmp_guard_class(op, arglocs, regalloc) self._emit_guard(op, arglocs[3:], c.NE, save_exc=False) def emit_guard_nonnull_class(self, op, arglocs, regalloc): - offset = self.cpu.vtable_offset - self.mc.cmp_op(0, arglocs[0].value, 0, imm=True) + self.mc.cmp_op(0, arglocs[0].value, 1, imm=True, signed=False) + patch_pos = self.mc.currpos() + self.mc.nop() + self._cmp_guard_class(op, arglocs, regalloc) + pmc = OverwritingBuilder(self.mc, patch_pos, 1) + pmc.bc(12, 0, self.mc.currpos() - patch_pos) + pmc.overwrite() + self._emit_guard(op, arglocs[3:], c.NE, save_exc=False) + + def _cmp_guard_class(self, op, locs, regalloc): + offset = locs[2] if offset is not None: - self._emit_guard(op, arglocs[3:], c.EQ) + #self.mc.LDR_ri(r.ip.value, locs[0].value, offset.value, cond=fcond) + #self.mc.CMP_rr(r.ip.value, locs[1].value, cond=fcond) + with scratch_reg(self.mc): + self.mc.load(r.SCRATCH.value, locs[0].value, offset.value) + self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value) else: raise NotImplementedError - self._cmp_guard_class(op, arglocs, regalloc) + # XXX port from x86 backend once gc support is in place def emit_guard_not_invalidated(self, op, locs, regalloc): return self._emit_guard(op, locs, c.EQ, is_guard_not_invalidated=True) diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py --- a/pypy/jit/backend/ppc/regalloc.py +++ b/pypy/jit/backend/ppc/regalloc.py @@ -456,14 +456,18 @@ def prepare_guard_class(self, op): assert isinstance(op.getarg(0), Box) boxes = op.getarglist() + x = self._ensure_value_is_boxed(boxes[0], boxes) - y = self.get_scratch_reg(REF, forbidden_vars=boxes) + y = self.get_scratch_reg(INT, forbidden_vars=boxes) y_val = rffi.cast(lltype.Signed, op.getarg(1).getint()) self.assembler.load(y, imm(y_val)) + offset = self.cpu.vtable_offset assert offset is not None - offset_loc = self._ensure_value_is_boxed(ConstInt(offset), boxes) + assert _check_imm_arg(offset) + offset_loc = imm(offset) arglocs = self._prepare_guard(op, [x, y, offset_loc]) + return arglocs prepare_guard_nonnull_class = prepare_guard_class From noreply at buildbot.pypy.org Thu Mar 1 13:56:29 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:56:29 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: merge Message-ID: <20120301125629.B20E38204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53050:be58b4073bf7 Date: 2012-03-01 04:09 -0800 http://bitbucket.org/pypy/pypy/changeset/be58b4073bf7/ Log: merge diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py --- a/ctypes_configure/cbuild.py +++ b/ctypes_configure/cbuild.py @@ -206,8 +206,9 @@ cfiles += eci.separate_module_files include_dirs = list(eci.include_dirs) library_dirs = list(eci.library_dirs) - if sys.platform == 'darwin': # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in include_dirs and \ os.path.exists(s + 'include'): include_dirs.append(s + 'include') @@ -380,9 +381,9 @@ self.link_extra += ['-pthread'] if sys.platform == 'win32': self.link_extra += ['/DEBUG'] # generate .pdb file - if sys.platform == 'darwin': - # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in self.include_dirs and \ os.path.exists(s + 'include'): self.include_dirs.append(s + 'include') @@ -395,7 +396,6 @@ self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) else: self.outputfilename = py.path.local(outputfilename) - self.eci = eci def build(self, noerr=False): basename = self.outputfilename.new(ext='') @@ -436,7 +436,7 @@ old = cfile.dirpath().chdir() try: res = compiler.compile([cfile.basename], - include_dirs=self.eci.include_dirs, + include_dirs=self.include_dirs, extra_preargs=self.compile_extra) assert len(res) == 1 cobjfile = py.path.local(res[0]) @@ -445,9 +445,9 @@ finally: old.chdir() compiler.link_executable(objects, str(self.outputfilename), - libraries=self.eci.libraries, + libraries=self.libraries, extra_preargs=self.link_extra, - library_dirs=self.eci.library_dirs) + library_dirs=self.library_dirs) def build_executable(*args, **kwds): noerr = kwds.pop('noerr', False) diff --git a/lib-python/modified-2.7/UserDict.py b/lib-python/modified-2.7/UserDict.py --- a/lib-python/modified-2.7/UserDict.py +++ b/lib-python/modified-2.7/UserDict.py @@ -85,8 +85,12 @@ def __iter__(self): return iter(self.data) -import _abcoll -_abcoll.MutableMapping.register(IterableUserDict) +try: + import _abcoll +except ImportError: + pass # e.g. no '_weakref' module on this pypy +else: + _abcoll.MutableMapping.register(IterableUserDict) class DictMixin: diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py --- a/lib_pypy/_subprocess.py +++ b/lib_pypy/_subprocess.py @@ -87,7 +87,7 @@ # Now the _subprocess module implementation -from ctypes import c_int as _c_int, byref as _byref +from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError class _handle: def __init__(self, handle): @@ -116,7 +116,7 @@ res = _CreatePipe(_byref(read), _byref(write), None, size) if not res: - raise WindowsError("Error") + raise _WinError() return _handle(read.value), _handle(write.value) @@ -132,7 +132,7 @@ access, inherit, options) if not res: - raise WindowsError("Error") + raise _WinError() return _handle(target.value) DUPLICATE_SAME_ACCESS = 2 @@ -165,7 +165,7 @@ start_dir, _byref(si), _byref(pi)) if not res: - raise WindowsError("Error") + raise _WinError() return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID STARTF_USESHOWWINDOW = 0x001 @@ -178,7 +178,7 @@ res = _WaitForSingleObject(int(handle), milliseconds) if res < 0: - raise WindowsError("Error") + raise _WinError() return res INFINITE = 0xffffffff @@ -190,7 +190,7 @@ res = _GetExitCodeProcess(int(handle), _byref(code)) if not res: - raise WindowsError("Error") + raise _WinError() return code.value @@ -198,7 +198,7 @@ res = _TerminateProcess(int(handle), exitcode) if not res: - raise WindowsError("Error") + raise _WinError() def GetStdHandle(stdhandle): res = _GetStdHandle(stdhandle) diff --git a/lib_pypy/ctypes_config_cache/pyexpat.ctc.py b/lib_pypy/ctypes_config_cache/pyexpat.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/pyexpat.ctc.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -'ctypes_configure' source for pyexpat.py. -Run this to rebuild _pyexpat_cache.py. -""" - -import ctypes -from ctypes import c_char_p, c_int, c_void_p, c_char -from ctypes_configure import configure -import dumpcache - - -class CConfigure: - _compilation_info_ = configure.ExternalCompilationInfo( - includes = ['expat.h'], - libraries = ['expat'], - pre_include_lines = [ - '#define XML_COMBINED_VERSION (10000*XML_MAJOR_VERSION+100*XML_MINOR_VERSION+XML_MICRO_VERSION)'], - ) - - XML_Char = configure.SimpleType('XML_Char', c_char) - XML_COMBINED_VERSION = configure.ConstantInteger('XML_COMBINED_VERSION') - for name in ['XML_PARAM_ENTITY_PARSING_NEVER', - 'XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE', - 'XML_PARAM_ENTITY_PARSING_ALWAYS']: - locals()[name] = configure.ConstantInteger(name) - - XML_Encoding = configure.Struct('XML_Encoding',[ - ('data', c_void_p), - ('convert', c_void_p), - ('release', c_void_p), - ('map', c_int * 256)]) - XML_Content = configure.Struct('XML_Content',[ - ('numchildren', c_int), - ('children', c_void_p), - ('name', c_char_p), - ('type', c_int), - ('quant', c_int), - ]) - # this is insanely stupid - XML_FALSE = configure.ConstantInteger('XML_FALSE') - XML_TRUE = configure.ConstantInteger('XML_TRUE') - -config = configure.configure(CConfigure) - -dumpcache.dumpcache2('pyexpat', config) diff --git a/lib_pypy/ctypes_config_cache/test/test_cache.py b/lib_pypy/ctypes_config_cache/test/test_cache.py --- a/lib_pypy/ctypes_config_cache/test/test_cache.py +++ b/lib_pypy/ctypes_config_cache/test/test_cache.py @@ -39,10 +39,6 @@ d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d -def test_pyexpat(): - d = run('pyexpat.ctc.py', '_pyexpat_cache.py') - assert 'XML_COMBINED_VERSION' in d - def test_locale(): d = run('locale.ctc.py', '_locale_cache.py') assert 'LC_ALL' in d diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -271,8 +271,9 @@ raise ValueError("%s()=%d, must be in -1439..1439" % (name, offset)) def _check_date_fields(year, month, day): - if not isinstance(year, (int, long)): - raise TypeError('int expected') + for value in [year, day]: + if not isinstance(value, (int, long)): + raise TypeError('int expected') if not MINYEAR <= year <= MAXYEAR: raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) if not 1 <= month <= 12: @@ -282,8 +283,9 @@ raise ValueError('day must be in 1..%d' % dim, day) def _check_time_fields(hour, minute, second, microsecond): - if not isinstance(hour, (int, long)): - raise TypeError('int expected') + for value in [hour, minute, second, microsecond]: + if not isinstance(value, (int, long)): + raise TypeError('int expected') if not 0 <= hour <= 23: raise ValueError('hour must be in 0..23', hour) if not 0 <= minute <= 59: @@ -1520,7 +1522,7 @@ def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." t, frac = divmod(t, 1.0) - us = round(frac * 1e6) + us = int(round(frac * 1e6)) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, diff --git a/lib_pypy/pyexpat.py b/lib_pypy/pyexpat.py deleted file mode 100644 --- a/lib_pypy/pyexpat.py +++ /dev/null @@ -1,448 +0,0 @@ - -import ctypes -import ctypes.util -from ctypes import c_char_p, c_int, c_void_p, POINTER, c_char, c_wchar_p -import sys - -# load the platform-specific cache made by running pyexpat.ctc.py -from ctypes_config_cache._pyexpat_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -lib = ctypes.CDLL(ctypes.util.find_library('expat')) - - -XML_Content.children = POINTER(XML_Content) -XML_Parser = ctypes.c_void_p # an opaque pointer -assert XML_Char is ctypes.c_char # this assumption is everywhere in -# cpython's expat, let's explode - -def declare_external(name, args, res): - func = getattr(lib, name) - func.args = args - func.restype = res - globals()[name] = func - -declare_external('XML_ParserCreate', [c_char_p], XML_Parser) -declare_external('XML_ParserCreateNS', [c_char_p, c_char], XML_Parser) -declare_external('XML_Parse', [XML_Parser, c_char_p, c_int, c_int], c_int) -currents = ['CurrentLineNumber', 'CurrentColumnNumber', - 'CurrentByteIndex'] -for name in currents: - func = getattr(lib, 'XML_Get' + name) - func.args = [XML_Parser] - func.restype = c_int - -declare_external('XML_SetReturnNSTriplet', [XML_Parser, c_int], None) -declare_external('XML_GetSpecifiedAttributeCount', [XML_Parser], c_int) -declare_external('XML_SetParamEntityParsing', [XML_Parser, c_int], None) -declare_external('XML_GetErrorCode', [XML_Parser], c_int) -declare_external('XML_StopParser', [XML_Parser, c_int], None) -declare_external('XML_ErrorString', [c_int], c_char_p) -declare_external('XML_SetBase', [XML_Parser, c_char_p], None) -if XML_COMBINED_VERSION >= 19505: - declare_external('XML_UseForeignDTD', [XML_Parser, c_int], None) - -declare_external('XML_SetUnknownEncodingHandler', [XML_Parser, c_void_p, - c_void_p], None) -declare_external('XML_FreeContentModel', [XML_Parser, POINTER(XML_Content)], - None) -declare_external('XML_ExternalEntityParserCreate', [XML_Parser,c_char_p, - c_char_p], - XML_Parser) - -handler_names = [ - 'StartElement', - 'EndElement', - 'ProcessingInstruction', - 'CharacterData', - 'UnparsedEntityDecl', - 'NotationDecl', - 'StartNamespaceDecl', - 'EndNamespaceDecl', - 'Comment', - 'StartCdataSection', - 'EndCdataSection', - 'Default', - 'DefaultHandlerExpand', - 'NotStandalone', - 'ExternalEntityRef', - 'StartDoctypeDecl', - 'EndDoctypeDecl', - 'EntityDecl', - 'XmlDecl', - 'ElementDecl', - 'AttlistDecl', - ] -if XML_COMBINED_VERSION >= 19504: - handler_names.append('SkippedEntity') -setters = {} - -for name in handler_names: - if name == 'DefaultHandlerExpand': - newname = 'XML_SetDefaultHandlerExpand' - else: - name += 'Handler' - newname = 'XML_Set' + name - cfunc = getattr(lib, newname) - cfunc.args = [XML_Parser, ctypes.c_void_p] - cfunc.result = ctypes.c_int - setters[name] = cfunc - -class ExpatError(Exception): - def __str__(self): - return self.s - -error = ExpatError - -class XMLParserType(object): - specified_attributes = 0 - ordered_attributes = 0 - returns_unicode = 1 - encoding = 'utf-8' - def __init__(self, encoding, namespace_separator, _hook_external_entity=False): - self.returns_unicode = 1 - if encoding: - self.encoding = encoding - if not _hook_external_entity: - if namespace_separator is None: - self.itself = XML_ParserCreate(encoding) - else: - self.itself = XML_ParserCreateNS(encoding, ord(namespace_separator)) - if not self.itself: - raise RuntimeError("Creating parser failed") - self._set_unknown_encoding_handler() - self.storage = {} - self.buffer = None - self.buffer_size = 8192 - self.character_data_handler = None - self.intern = {} - self.__exc_info = None - - def _flush_character_buffer(self): - if not self.buffer: - return - res = self._call_character_handler(''.join(self.buffer)) - self.buffer = [] - return res - - def _call_character_handler(self, buf): - if self.character_data_handler: - self.character_data_handler(buf) - - def _set_unknown_encoding_handler(self): - def UnknownEncoding(encodingData, name, info_p): - info = info_p.contents - s = ''.join([chr(i) for i in range(256)]) - u = s.decode(self.encoding, 'replace') - for i in range(len(u)): - if u[i] == u'\xfffd': - info.map[i] = -1 - else: - info.map[i] = ord(u[i]) - info.data = None - info.convert = None - info.release = None - return 1 - - CB = ctypes.CFUNCTYPE(c_int, c_void_p, c_char_p, POINTER(XML_Encoding)) - cb = CB(UnknownEncoding) - self._unknown_encoding_handler = (cb, UnknownEncoding) - XML_SetUnknownEncodingHandler(self.itself, cb, None) - - def _set_error(self, code): - e = ExpatError() - e.code = code - lineno = lib.XML_GetCurrentLineNumber(self.itself) - colno = lib.XML_GetCurrentColumnNumber(self.itself) - e.offset = colno - e.lineno = lineno - err = XML_ErrorString(code)[:200] - e.s = "%s: line: %d, column: %d" % (err, lineno, colno) - e.message = e.s - self._error = e - - def Parse(self, data, is_final=0): - res = XML_Parse(self.itself, data, len(data), is_final) - if res == 0: - self._set_error(XML_GetErrorCode(self.itself)) - if self.__exc_info: - exc_info = self.__exc_info - self.__exc_info = None - raise exc_info[0], exc_info[1], exc_info[2] - else: - raise self._error - self._flush_character_buffer() - return res - - def _sethandler(self, name, real_cb): - setter = setters[name] - try: - cb = self.storage[(name, real_cb)] - except KeyError: - cb = getattr(self, 'get_cb_for_%s' % name)(real_cb) - self.storage[(name, real_cb)] = cb - except TypeError: - # weellll... - cb = getattr(self, 'get_cb_for_%s' % name)(real_cb) - setter(self.itself, cb) - - def _wrap_cb(self, cb): - def f(*args): - try: - return cb(*args) - except: - self.__exc_info = sys.exc_info() - XML_StopParser(self.itself, XML_FALSE) - return f - - def get_cb_for_StartElementHandler(self, real_cb): - def StartElement(unused, name, attrs): - # unpack name and attrs - conv = self.conv - self._flush_character_buffer() - if self.specified_attributes: - max = XML_GetSpecifiedAttributeCount(self.itself) - else: - max = 0 - while attrs[max]: - max += 2 # copied - if self.ordered_attributes: - res = [attrs[i] for i in range(max)] - else: - res = {} - for i in range(0, max, 2): - res[conv(attrs[i])] = conv(attrs[i + 1]) - real_cb(conv(name), res) - StartElement = self._wrap_cb(StartElement) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, POINTER(c_char_p)) - return CB(StartElement) - - def get_cb_for_ExternalEntityRefHandler(self, real_cb): - def ExternalEntity(unused, context, base, sysId, pubId): - self._flush_character_buffer() - conv = self.conv - res = real_cb(conv(context), conv(base), conv(sysId), - conv(pubId)) - if res is None: - return 0 - return res - ExternalEntity = self._wrap_cb(ExternalEntity) - CB = ctypes.CFUNCTYPE(c_int, c_void_p, *([c_char_p] * 4)) - return CB(ExternalEntity) - - def get_cb_for_CharacterDataHandler(self, real_cb): - def CharacterData(unused, s, lgt): - if self.buffer is None: - self._call_character_handler(self.conv(s[:lgt])) - else: - if len(self.buffer) + lgt > self.buffer_size: - self._flush_character_buffer() - if self.character_data_handler is None: - return - if lgt >= self.buffer_size: - self._call_character_handler(s[:lgt]) - self.buffer = [] - else: - self.buffer.append(s[:lgt]) - CharacterData = self._wrap_cb(CharacterData) - CB = ctypes.CFUNCTYPE(None, c_void_p, POINTER(c_char), c_int) - return CB(CharacterData) - - def get_cb_for_NotStandaloneHandler(self, real_cb): - def NotStandaloneHandler(unused): - return real_cb() - NotStandaloneHandler = self._wrap_cb(NotStandaloneHandler) - CB = ctypes.CFUNCTYPE(c_int, c_void_p) - return CB(NotStandaloneHandler) - - def get_cb_for_EntityDeclHandler(self, real_cb): - def EntityDecl(unused, ename, is_param, value, value_len, base, - system_id, pub_id, not_name): - self._flush_character_buffer() - if not value: - value = None - else: - value = value[:value_len] - args = [ename, is_param, value, base, system_id, - pub_id, not_name] - args = [self.conv(arg) for arg in args] - real_cb(*args) - EntityDecl = self._wrap_cb(EntityDecl) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, c_int, c_char_p, - c_int, c_char_p, c_char_p, c_char_p, c_char_p) - return CB(EntityDecl) - - def _conv_content_model(self, model): - children = tuple([self._conv_content_model(model.children[i]) - for i in range(model.numchildren)]) - return (model.type, model.quant, self.conv(model.name), - children) - - def get_cb_for_ElementDeclHandler(self, real_cb): - def ElementDecl(unused, name, model): - self._flush_character_buffer() - modelobj = self._conv_content_model(model[0]) - real_cb(name, modelobj) - XML_FreeContentModel(self.itself, model) - - ElementDecl = self._wrap_cb(ElementDecl) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, POINTER(XML_Content)) - return CB(ElementDecl) - - def _new_callback_for_string_len(name, sign): - def get_callback_for_(self, real_cb): - def func(unused, s, len): - self._flush_character_buffer() - arg = self.conv(s[:len]) - real_cb(arg) - func.func_name = name - func = self._wrap_cb(func) - CB = ctypes.CFUNCTYPE(*sign) - return CB(func) - get_callback_for_.func_name = 'get_cb_for_' + name - return get_callback_for_ - - for name in ['DefaultHandlerExpand', - 'DefaultHandler']: - sign = [None, c_void_p, POINTER(c_char), c_int] - name = 'get_cb_for_' + name - locals()[name] = _new_callback_for_string_len(name, sign) - - def _new_callback_for_starargs(name, sign): - def get_callback_for_(self, real_cb): - def func(unused, *args): - self._flush_character_buffer() - args = [self.conv(arg) for arg in args] - real_cb(*args) - func.func_name = name - func = self._wrap_cb(func) - CB = ctypes.CFUNCTYPE(*sign) - return CB(func) - get_callback_for_.func_name = 'get_cb_for_' + name - return get_callback_for_ - - for name, num_or_sign in [ - ('EndElementHandler', 1), - ('ProcessingInstructionHandler', 2), - ('UnparsedEntityDeclHandler', 5), - ('NotationDeclHandler', 4), - ('StartNamespaceDeclHandler', 2), - ('EndNamespaceDeclHandler', 1), - ('CommentHandler', 1), - ('StartCdataSectionHandler', 0), - ('EndCdataSectionHandler', 0), - ('StartDoctypeDeclHandler', [None, c_void_p] + [c_char_p] * 3 + [c_int]), - ('XmlDeclHandler', [None, c_void_p, c_char_p, c_char_p, c_int]), - ('AttlistDeclHandler', [None, c_void_p] + [c_char_p] * 4 + [c_int]), - ('EndDoctypeDeclHandler', 0), - ('SkippedEntityHandler', [None, c_void_p, c_char_p, c_int]), - ]: - if isinstance(num_or_sign, int): - sign = [None, c_void_p] + [c_char_p] * num_or_sign - else: - sign = num_or_sign - name = 'get_cb_for_' + name - locals()[name] = _new_callback_for_starargs(name, sign) - - def conv_unicode(self, s): - if s is None or isinstance(s, int): - return s - return s.decode(self.encoding, "strict") - - def __setattr__(self, name, value): - # forest of ifs... - if name in ['ordered_attributes', - 'returns_unicode', 'specified_attributes']: - if value: - if name == 'returns_unicode': - self.conv = self.conv_unicode - self.__dict__[name] = 1 - else: - if name == 'returns_unicode': - self.conv = lambda s: s - self.__dict__[name] = 0 - elif name == 'buffer_text': - if value: - self.buffer = [] - else: - self._flush_character_buffer() - self.buffer = None - elif name == 'buffer_size': - if not isinstance(value, int): - raise TypeError("Expected int") - if value <= 0: - raise ValueError("Expected positive int") - self.__dict__[name] = value - elif name == 'namespace_prefixes': - XML_SetReturnNSTriplet(self.itself, int(bool(value))) - elif name in setters: - if name == 'CharacterDataHandler': - # XXX we need to flush buffer here - self._flush_character_buffer() - self.character_data_handler = value - #print name - #print value - #print - self._sethandler(name, value) - else: - self.__dict__[name] = value - - def SetParamEntityParsing(self, arg): - XML_SetParamEntityParsing(self.itself, arg) - - if XML_COMBINED_VERSION >= 19505: - def UseForeignDTD(self, arg=True): - if arg: - flag = XML_TRUE - else: - flag = XML_FALSE - XML_UseForeignDTD(self.itself, flag) - - def __getattr__(self, name): - if name == 'buffer_text': - return self.buffer is not None - elif name in currents: - return getattr(lib, 'XML_Get' + name)(self.itself) - elif name == 'ErrorColumnNumber': - return lib.XML_GetCurrentColumnNumber(self.itself) - elif name == 'ErrorLineNumber': - return lib.XML_GetCurrentLineNumber(self.itself) - return self.__dict__[name] - - def ParseFile(self, file): - return self.Parse(file.read(), False) - - def SetBase(self, base): - XML_SetBase(self.itself, base) - - def ExternalEntityParserCreate(self, context, encoding=None): - """ExternalEntityParserCreate(context[, encoding]) - Create a parser for parsing an external entity based on the - information passed to the ExternalEntityRefHandler.""" - new_parser = XMLParserType(encoding, None, True) - new_parser.itself = XML_ExternalEntityParserCreate(self.itself, - context, encoding) - new_parser._set_unknown_encoding_handler() - return new_parser - - at builtinify -def ErrorString(errno): - return XML_ErrorString(errno)[:200] - - at builtinify -def ParserCreate(encoding=None, namespace_separator=None, intern=None): - if (not isinstance(encoding, str) and - not encoding is None): - raise TypeError("ParserCreate() argument 1 must be string or None, not %s" % encoding.__class__.__name__) - if (not isinstance(namespace_separator, str) and - not namespace_separator is None): - raise TypeError("ParserCreate() argument 2 must be string or None, not %s" % namespace_separator.__class__.__name__) - if namespace_separator is not None: - if len(namespace_separator) > 1: - raise ValueError('namespace_separator must be at most one character, omitted, or None') - if len(namespace_separator) == 0: - namespace_separator = None - return XMLParserType(encoding, namespace_separator) diff --git a/lib_pypy/pypy_test/test_pyexpat.py b/lib_pypy/pypy_test/test_pyexpat.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_pyexpat.py +++ /dev/null @@ -1,665 +0,0 @@ -# XXX TypeErrors on calling handlers, or on bad return values from a -# handler, are obscure and unhelpful. - -from __future__ import absolute_import -import StringIO, sys -import unittest, py - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('pyexpat.ctc.py') - -from lib_pypy import pyexpat -#from xml.parsers import expat -expat = pyexpat - -from test.test_support import sortdict, run_unittest - - -class TestSetAttribute: - def setup_method(self, meth): - self.parser = expat.ParserCreate(namespace_separator='!') - self.set_get_pairs = [ - [0, 0], - [1, 1], - [2, 1], - [0, 0], - ] - - def test_returns_unicode(self): - for x, y in self.set_get_pairs: - self.parser.returns_unicode = x - assert self.parser.returns_unicode == y - - def test_ordered_attributes(self): - for x, y in self.set_get_pairs: - self.parser.ordered_attributes = x - assert self.parser.ordered_attributes == y - - def test_specified_attributes(self): - for x, y in self.set_get_pairs: - self.parser.specified_attributes = x - assert self.parser.specified_attributes == y - - -data = '''\ - - - - - - - - - -%unparsed_entity; -]> - - - - Contents of subelements - - -&external_entity; -&skipped_entity; - -''' - - -# Produce UTF-8 output -class TestParse: - class Outputter: - def __init__(self): - self.out = [] - - def StartElementHandler(self, name, attrs): - self.out.append('Start element: ' + repr(name) + ' ' + - sortdict(attrs)) - - def EndElementHandler(self, name): - self.out.append('End element: ' + repr(name)) - - def CharacterDataHandler(self, data): - data = data.strip() - if data: - self.out.append('Character data: ' + repr(data)) - - def ProcessingInstructionHandler(self, target, data): - self.out.append('PI: ' + repr(target) + ' ' + repr(data)) - - def StartNamespaceDeclHandler(self, prefix, uri): - self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri)) - - def EndNamespaceDeclHandler(self, prefix): - self.out.append('End of NS decl: ' + repr(prefix)) - - def StartCdataSectionHandler(self): - self.out.append('Start of CDATA section') - - def EndCdataSectionHandler(self): - self.out.append('End of CDATA section') - - def CommentHandler(self, text): - self.out.append('Comment: ' + repr(text)) - - def NotationDeclHandler(self, *args): - name, base, sysid, pubid = args - self.out.append('Notation declared: %s' %(args,)) - - def UnparsedEntityDeclHandler(self, *args): - entityName, base, systemId, publicId, notationName = args - self.out.append('Unparsed entity decl: %s' %(args,)) - - def NotStandaloneHandler(self): - self.out.append('Not standalone') - return 1 - - def ExternalEntityRefHandler(self, *args): - context, base, sysId, pubId = args - self.out.append('External entity ref: %s' %(args[1:],)) - return 1 - - def StartDoctypeDeclHandler(self, *args): - self.out.append(('Start doctype', args)) - return 1 - - def EndDoctypeDeclHandler(self): - self.out.append("End doctype") - return 1 - - def EntityDeclHandler(self, *args): - self.out.append(('Entity declaration', args)) - return 1 - - def XmlDeclHandler(self, *args): - self.out.append(('XML declaration', args)) - return 1 - - def ElementDeclHandler(self, *args): - self.out.append(('Element declaration', args)) - return 1 - - def AttlistDeclHandler(self, *args): - self.out.append(('Attribute list declaration', args)) - return 1 - - def SkippedEntityHandler(self, *args): - self.out.append(("Skipped entity", args)) - return 1 - - def DefaultHandler(self, userData): - pass - - def DefaultHandlerExpand(self, userData): - pass - - handler_names = [ - 'StartElementHandler', 'EndElementHandler', 'CharacterDataHandler', - 'ProcessingInstructionHandler', 'UnparsedEntityDeclHandler', - 'NotationDeclHandler', 'StartNamespaceDeclHandler', - 'EndNamespaceDeclHandler', 'CommentHandler', - 'StartCdataSectionHandler', 'EndCdataSectionHandler', 'DefaultHandler', - 'DefaultHandlerExpand', 'NotStandaloneHandler', - 'ExternalEntityRefHandler', 'StartDoctypeDeclHandler', - 'EndDoctypeDeclHandler', 'EntityDeclHandler', 'XmlDeclHandler', - 'ElementDeclHandler', 'AttlistDeclHandler', 'SkippedEntityHandler', - ] - - def test_utf8(self): - - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - parser.returns_unicode = 0 - parser.Parse(data, 1) - - # Verify output - operations = out.out - expected_operations = [ - ('XML declaration', (u'1.0', u'iso-8859-1', 0)), - 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'', - "Comment: ' comment data '", - "Not standalone", - ("Start doctype", ('quotations', 'quotations.dtd', None, 1)), - ('Element declaration', (u'root', (2, 0, None, ()))), - ('Attribute list declaration', ('root', 'attr1', 'CDATA', None, - 1)), - ('Attribute list declaration', ('root', 'attr2', 'CDATA', None, - 0)), - "Notation declared: ('notation', None, 'notation.jpeg', None)", - ('Entity declaration', ('acirc', 0, '\xc3\xa2', None, None, None, None)), - ('Entity declaration', ('external_entity', 0, None, None, - 'entity.file', None, None)), - "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')", - "Not standalone", - "End doctype", - "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}", - "NS decl: 'myns' 'http://www.python.org/namespace'", - "Start element: 'http://www.python.org/namespace!subelement' {}", - "Character data: 'Contents of subelements'", - "End element: 'http://www.python.org/namespace!subelement'", - "End of NS decl: 'myns'", - "Start element: 'sub2' {}", - 'Start of CDATA section', - "Character data: 'contents of CDATA section'", - 'End of CDATA section', - "End element: 'sub2'", - "External entity ref: (None, 'entity.file', None)", - ('Skipped entity', ('skipped_entity', 0)), - "End element: 'root'", - ] - for operation, expected_operation in zip(operations, expected_operations): - assert operation == expected_operation - - def test_unicode(self): - # Try the parse again, this time producing Unicode output - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - parser.returns_unicode = 1 - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - - parser.Parse(data, 1) - - operations = out.out - expected_operations = [ - ('XML declaration', (u'1.0', u'iso-8859-1', 0)), - 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'', - "Comment: u' comment data '", - "Not standalone", - ("Start doctype", ('quotations', 'quotations.dtd', None, 1)), - ('Element declaration', (u'root', (2, 0, None, ()))), - ('Attribute list declaration', ('root', 'attr1', 'CDATA', None, - 1)), - ('Attribute list declaration', ('root', 'attr2', 'CDATA', None, - 0)), - "Notation declared: (u'notation', None, u'notation.jpeg', None)", - ('Entity declaration', (u'acirc', 0, u'\xe2', None, None, None, - None)), - ('Entity declaration', (u'external_entity', 0, None, None, - u'entity.file', None, None)), - "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')", - "Not standalone", - "End doctype", - "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}", - "NS decl: u'myns' u'http://www.python.org/namespace'", - "Start element: u'http://www.python.org/namespace!subelement' {}", - "Character data: u'Contents of subelements'", - "End element: u'http://www.python.org/namespace!subelement'", - "End of NS decl: u'myns'", - "Start element: u'sub2' {}", - 'Start of CDATA section', - "Character data: u'contents of CDATA section'", - 'End of CDATA section', - "End element: u'sub2'", - "External entity ref: (None, u'entity.file', None)", - ('Skipped entity', ('skipped_entity', 0)), - "End element: u'root'", - ] - for operation, expected_operation in zip(operations, expected_operations): - assert operation == expected_operation - - def test_parse_file(self): - # Try parsing a file - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - parser.returns_unicode = 1 - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - file = StringIO.StringIO(data) - - parser.ParseFile(file) - - operations = out.out - expected_operations = [ - ('XML declaration', (u'1.0', u'iso-8859-1', 0)), - 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'', - "Comment: u' comment data '", - "Not standalone", - ("Start doctype", ('quotations', 'quotations.dtd', None, 1)), - ('Element declaration', (u'root', (2, 0, None, ()))), - ('Attribute list declaration', ('root', 'attr1', 'CDATA', None, - 1)), - ('Attribute list declaration', ('root', 'attr2', 'CDATA', None, - 0)), - "Notation declared: (u'notation', None, u'notation.jpeg', None)", - ('Entity declaration', ('acirc', 0, u'\xe2', None, None, None, None)), - ('Entity declaration', (u'external_entity', 0, None, None, u'entity.file', None, None)), - "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')", - "Not standalone", - "End doctype", - "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}", - "NS decl: u'myns' u'http://www.python.org/namespace'", - "Start element: u'http://www.python.org/namespace!subelement' {}", - "Character data: u'Contents of subelements'", - "End element: u'http://www.python.org/namespace!subelement'", - "End of NS decl: u'myns'", - "Start element: u'sub2' {}", - 'Start of CDATA section', - "Character data: u'contents of CDATA section'", - 'End of CDATA section', - "End element: u'sub2'", - "External entity ref: (None, u'entity.file', None)", - ('Skipped entity', ('skipped_entity', 0)), - "End element: u'root'", - ] - for operation, expected_operation in zip(operations, expected_operations): - assert operation == expected_operation - - -class TestNamespaceSeparator: - def test_legal(self): - # Tests that make sure we get errors when the namespace_separator value - # is illegal, and that we don't for good values: - expat.ParserCreate() - expat.ParserCreate(namespace_separator=None) - expat.ParserCreate(namespace_separator=' ') - - def test_illegal(self): - try: - expat.ParserCreate(namespace_separator=42) - raise AssertionError - except TypeError, e: - assert str(e) == ( - 'ParserCreate() argument 2 must be string or None, not int') - - try: - expat.ParserCreate(namespace_separator='too long') - raise AssertionError - except ValueError, e: - assert str(e) == ( - 'namespace_separator must be at most one character, omitted, or None') - - def test_zero_length(self): - # ParserCreate() needs to accept a namespace_separator of zero length - # to satisfy the requirements of RDF applications that are required - # to simply glue together the namespace URI and the localname. Though - # considered a wart of the RDF specifications, it needs to be supported. - # - # See XML-SIG mailing list thread starting with - # http://mail.python.org/pipermail/xml-sig/2001-April/005202.html - # - expat.ParserCreate(namespace_separator='') # too short - - -class TestInterning: - def test(self): - py.test.skip("Not working") - # Test the interning machinery. - p = expat.ParserCreate() - L = [] - def collector(name, *args): - L.append(name) - p.StartElementHandler = collector - p.EndElementHandler = collector - p.Parse(" ", 1) - tag = L[0] - assert len(L) == 6 - for entry in L: - # L should have the same string repeated over and over. - assert tag is entry - - -class TestBufferText: - def setup_method(self, meth): - self.stuff = [] - self.parser = expat.ParserCreate() - self.parser.buffer_text = 1 - self.parser.CharacterDataHandler = self.CharacterDataHandler - - def check(self, expected, label): - assert self.stuff == expected, ( - "%s\nstuff = %r\nexpected = %r" - % (label, self.stuff, map(unicode, expected))) - - def CharacterDataHandler(self, text): - self.stuff.append(text) - - def StartElementHandler(self, name, attrs): - self.stuff.append("<%s>" % name) - bt = attrs.get("buffer-text") - if bt == "yes": - self.parser.buffer_text = 1 - elif bt == "no": - self.parser.buffer_text = 0 - - def EndElementHandler(self, name): - self.stuff.append("" % name) - - def CommentHandler(self, data): - self.stuff.append("" % data) - - def setHandlers(self, handlers=[]): - for name in handlers: - setattr(self.parser, name, getattr(self, name)) - - def test_default_to_disabled(self): - parser = expat.ParserCreate() - assert not parser.buffer_text - - def test_buffering_enabled(self): - # Make sure buffering is turned on - assert self.parser.buffer_text - self.parser.Parse("123", 1) - assert self.stuff == ['123'], ( - "buffered text not properly collapsed") - - def test1(self): - # XXX This test exposes more detail of Expat's text chunking than we - # XXX like, but it tests what we need to concisely. - self.setHandlers(["StartElementHandler"]) - self.parser.Parse("12\n34\n5", 1) - assert self.stuff == ( - ["", "1", "", "2", "\n", "3", "", "4\n5"]), ( - "buffering control not reacting as expected") - - def test2(self): - self.parser.Parse("1<2> \n 3", 1) - assert self.stuff == ["1<2> \n 3"], ( - "buffered text not properly collapsed") - - def test3(self): - self.setHandlers(["StartElementHandler"]) - self.parser.Parse("123", 1) - assert self.stuff == ["", "1", "", "2", "", "3"], ( - "buffered text not properly split") - - def test4(self): - self.setHandlers(["StartElementHandler", "EndElementHandler"]) - self.parser.CharacterDataHandler = None - self.parser.Parse("123", 1) - assert self.stuff == ( - ["", "", "", "", "", ""]) - - def test5(self): - self.setHandlers(["StartElementHandler", "EndElementHandler"]) - self.parser.Parse("123", 1) - assert self.stuff == ( - ["", "1", "", "", "2", "", "", "3", ""]) - - def test6(self): - self.setHandlers(["CommentHandler", "EndElementHandler", - "StartElementHandler"]) - self.parser.Parse("12345 ", 1) - assert self.stuff == ( - ["", "1", "", "", "2", "", "", "345", ""]), ( - "buffered text not properly split") - - def test7(self): - self.setHandlers(["CommentHandler", "EndElementHandler", - "StartElementHandler"]) - self.parser.Parse("12345 ", 1) - assert self.stuff == ( - ["", "1", "", "", "2", "", "", "3", - "", "4", "", "5", ""]), ( - "buffered text not properly split") - - -# Test handling of exception from callback: -class TestHandlerException: - def StartElementHandler(self, name, attrs): - raise RuntimeError(name) - - def test(self): - parser = expat.ParserCreate() - parser.StartElementHandler = self.StartElementHandler - try: - parser.Parse("", 1) - raise AssertionError - except RuntimeError, e: - assert e.args[0] == 'a', ( - "Expected RuntimeError for element 'a', but" + \ - " found %r" % e.args[0]) - - -# Test Current* members: -class TestPosition: - def StartElementHandler(self, name, attrs): - self.check_pos('s') - - def EndElementHandler(self, name): - self.check_pos('e') - - def check_pos(self, event): - pos = (event, - self.parser.CurrentByteIndex, - self.parser.CurrentLineNumber, - self.parser.CurrentColumnNumber) - assert self.upto < len(self.expected_list) - expected = self.expected_list[self.upto] - assert pos == expected, ( - 'Expected position %s, got position %s' %(pos, expected)) - self.upto += 1 - - def test(self): - self.parser = expat.ParserCreate() - self.parser.StartElementHandler = self.StartElementHandler - self.parser.EndElementHandler = self.EndElementHandler - self.upto = 0 - self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2), - ('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)] - - xml = '\n \n \n \n' - self.parser.Parse(xml, 1) - - -class Testsf1296433: - def test_parse_only_xml_data(self): - # http://python.org/sf/1296433 - # - xml = "%s" % ('a' * 1025) - # this one doesn't crash - #xml = "%s" % ('a' * 10000) - - class SpecificException(Exception): - pass - - def handler(text): - raise SpecificException - - parser = expat.ParserCreate() - parser.CharacterDataHandler = handler - - py.test.raises(Exception, parser.Parse, xml) - -class TestChardataBuffer: - """ - test setting of chardata buffer size - """ - - def test_1025_bytes(self): - assert self.small_buffer_test(1025) == 2 - - def test_1000_bytes(self): - assert self.small_buffer_test(1000) == 1 - - def test_wrong_size(self): - parser = expat.ParserCreate() - parser.buffer_text = 1 - def f(size): - parser.buffer_size = size - - py.test.raises(TypeError, f, sys.maxint+1) - py.test.raises(ValueError, f, -1) - py.test.raises(ValueError, f, 0) - - def test_unchanged_size(self): - xml1 = ("%s" % ('a' * 512)) - xml2 = 'a'*512 + '' - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_size = 512 - parser.buffer_text = 1 - - # Feed 512 bytes of character data: the handler should be called - # once. - self.n = 0 - parser.Parse(xml1) - assert self.n == 1 - - # Reassign to buffer_size, but assign the same size. - parser.buffer_size = parser.buffer_size - assert self.n == 1 - - # Try parsing rest of the document - parser.Parse(xml2) - assert self.n == 2 - - - def test_disabling_buffer(self): - xml1 = "%s" % ('a' * 512) - xml2 = ('b' * 1024) - xml3 = "%s" % ('c' * 1024) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 1024 - assert parser.buffer_size == 1024 - - # Parse one chunk of XML - self.n = 0 - parser.Parse(xml1, 0) - assert parser.buffer_size == 1024 - assert self.n == 1 - - # Turn off buffering and parse the next chunk. - parser.buffer_text = 0 - assert not parser.buffer_text - assert parser.buffer_size == 1024 - for i in range(10): - parser.Parse(xml2, 0) - assert self.n == 11 - - parser.buffer_text = 1 - assert parser.buffer_text - assert parser.buffer_size == 1024 - parser.Parse(xml3, 1) - assert self.n == 12 - - - - def make_document(self, bytes): - return ("" + bytes * 'a' + '') - - def counting_handler(self, text): - self.n += 1 - - def small_buffer_test(self, buffer_len): - xml = "%s" % ('a' * buffer_len) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_size = 1024 - parser.buffer_text = 1 - - self.n = 0 - parser.Parse(xml) - return self.n - - def test_change_size_1(self): - xml1 = "%s" % ('a' * 1024) - xml2 = "aaa%s" % ('a' * 1025) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 1024 - assert parser.buffer_size == 1024 - - self.n = 0 - parser.Parse(xml1, 0) - parser.buffer_size *= 2 - assert parser.buffer_size == 2048 - parser.Parse(xml2, 1) - assert self.n == 2 - - def test_change_size_2(self): - xml1 = "a%s" % ('a' * 1023) - xml2 = "aaa%s" % ('a' * 1025) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 2048 - assert parser.buffer_size == 2048 - - self.n=0 - parser.Parse(xml1, 0) - parser.buffer_size /= 2 - assert parser.buffer_size == 1024 - parser.Parse(xml2, 1) - assert self.n == 4 - - def test_segfault(self): - py.test.raises(TypeError, expat.ParserCreate, 1234123123) - -def test_invalid_data(): - parser = expat.ParserCreate() - parser.Parse('invalid.xml', 0) - try: - parser.Parse("", 1) - except expat.ExpatError, e: - assert e.code == 2 # XXX is this reliable? - assert e.lineno == 1 - assert e.message.startswith('syntax error') - else: - py.test.fail("Did not raise") - diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -106,7 +106,8 @@ BoolOption("sandbox", "Produce a fully-sandboxed executable", default=False, cmdline="--sandbox", requires=[("translation.thread", False)], - suggests=[("translation.gc", "generation")]), + suggests=[("translation.gc", "generation"), + ("translation.gcrootfinder", "shadowstack")]), BoolOption("rweakref", "The backend supports RPython-level weakrefs", default=True), diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -539,6 +539,7 @@ def _spawn(self, *args, **kwds): import pexpect + kwds.setdefault('timeout', 600) child = pexpect.spawn(*args, **kwds) child.logfile = sys.stdout return child diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -388,7 +388,9 @@ In a few cases (e.g. hash table manipulation), we need machine-sized unsigned arithmetic. For these cases there is the r_uint class, which is a pure Python implementation of word-sized unsigned integers that silently wrap - around. The purpose of this class (as opposed to helper functions as above) + around. ("word-sized" and "machine-sized" are used equivalently and mean + the native size, which you get using "unsigned long" in C.) + The purpose of this class (as opposed to helper functions as above) is consistent typing: both Python and the annotator will propagate r_uint instances in the program and interpret all the operations between them as unsigned. Instances of r_uint are special-cased by the code generators to diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.txt b/pypy/doc/config/objspace.usemodules.pyexpat.txt --- a/pypy/doc/config/objspace.usemodules.pyexpat.txt +++ b/pypy/doc/config/objspace.usemodules.pyexpat.txt @@ -1,2 +1,1 @@ -Use (experimental) pyexpat module written in RPython, instead of CTypes -version which is used by default. +Use the pyexpat module, written in RPython. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -103,18 +103,22 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0 with GCC 4.4.3] on linux2 + Python 2.7.2 (0e28b379d8b3, Feb 09 2012, 19:41:03) + [PyPy 1.8.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``this sentence is false'' >>>> 46 - 4 42 >>>> from test import pystone >>>> pystone.main() - Pystone(1.1) time for 50000 passes = 0.280017 - This machine benchmarks at 178561 pystones/second - >>>> + Pystone(1.1) time for 50000 passes = 0.220015 + This machine benchmarks at 227257 pystones/second + >>>> pystone.main() + Pystone(1.1) time for 50000 passes = 0.060004 + This machine benchmarks at 833278 pystones/second + >>>> +Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see Installation_ below. diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,14 +53,15 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-1.7-linux.tar.bz2 - - $ ./pypy-1.7/bin/pypy - Python 2.7.1 (?, Apr 27 2011, 12:44:21) - [PyPy 1.7.0 with GCC 4.4.3] on linux2 + $ tar xf pypy-1.8-linux.tar.bz2 + $ ./pypy-1.8/bin/pypy + Python 2.7.2 (0e28b379d8b3, Feb 09 2012, 19:41:03) + [PyPy 1.8.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``implementing LOGO in LOGO: - "turtles all the way down"'' + And now for something completely different: ``it seems to me that once you + settle on an execution / object model and / or bytecode format, you've already + decided what languages (where the 's' seems superfluous) support is going to be + first class for'' >>>> If you want to make PyPy available system-wide, you can put a symlink to the @@ -75,14 +76,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-1.7/bin/pypy distribute_setup.py + $ ./pypy-1.8/bin/pypy distribute_setup.py - $ ./pypy-1.7/bin/pypy get-pip.py + $ ./pypy-1.8/bin/pypy get-pip.py - $ ./pypy-1.7/bin/pip install pygments # for example + $ ./pypy-1.8/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-1.7/site-packages``, and -the scripts in ``pypy-1.7/bin``. +3rd party libraries will be installed in ``pypy-1.8/site-packages``, and +the scripts in ``pypy-1.8/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.7`_: the latest official release +* `Release 1.8`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.7`: http://pypy.org/download.html +.. _`Release 1.8`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.7`__. +instead of the latest release, which is `1.8`__. -.. __: release-1.7.0.html +.. __: release-1.8.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/release-1.8.0.rst b/pypy/doc/release-1.8.0.rst --- a/pypy/doc/release-1.8.0.rst +++ b/pypy/doc/release-1.8.0.rst @@ -2,16 +2,21 @@ PyPy 1.8 - business as usual ============================ -We're pleased to announce the 1.8 release of PyPy. As has become a habit, this -release brings a lot of bugfixes, and performance and memory improvements over -the 1.7 release. The main highlight of the release is the introduction of -list strategies which makes homogenous lists more efficient both in terms -of performance and memory. This release also upgrades us from Python 2.7.1 compatibility to 2.7.2. Otherwise it's "business as usual" in the sense -that performance improved roughly 10% on average since the previous release. -You can download the PyPy 1.8 release here: +We're pleased to announce the 1.8 release of PyPy. As habitual this +release brings a lot of bugfixes, together with performance and memory +improvements over the 1.7 release. The main highlight of the release +is the introduction of `list strategies`_ which makes homogenous lists +more efficient both in terms of performance and memory. This release +also upgrades us from Python 2.7.1 compatibility to 2.7.2. Otherwise +it's "business as usual" in the sense that performance improved +roughly 10% on average since the previous release. + +you can download the PyPy 1.8 release here: http://pypy.org/download.html +.. _`list strategies`: http://morepypy.blogspot.com/2011/10/more-compact-lists-with-list-strategies.html + What is PyPy? ============= @@ -60,13 +65,6 @@ * New JIT hooks that allow you to hook into the JIT process from your python program. There is a `brief overview`_ of what they offer. -* Since the last release there was a significant breakthrough in PyPy's - fundraising. We now have enough funds to work on first stages of `numpypy`_ - and `py3k`_. We would like to thank again to everyone who donated. - - It's also probably worth noting, we're considering donations for the STM - project. - * Standard library upgrade from 2.7.1 to 2.7.2. Ongoing work @@ -82,7 +80,15 @@ * More numpy work -* Software Transactional Memory, you can read more about `our plans`_ +* Since the last release there was a significant breakthrough in PyPy's + fundraising. We now have enough funds to work on first stages of `numpypy`_ + and `py3k`_. We would like to thank again to everyone who donated. + +* It's also probably worth noting, we're considering donations for the + Software Transactional Memory project. You can read more about `our plans`_ + +Cheers, +The PyPy Team .. _`brief overview`: http://doc.pypy.org/en/latest/jit-hooks.html .. _`numpy status page`: http://buildbot.pypy.org/numpy-status/latest.html diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -60,11 +60,10 @@ self.pycode = code eval.Frame.__init__(self, space, w_globals) self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize) - self.nlocals = code.co_nlocals self.valuestackdepth = code.co_nlocals self.lastblock = None make_sure_not_resized(self.locals_stack_w) - check_nonneg(self.nlocals) + check_nonneg(self.valuestackdepth) # if space.config.objspace.honor__builtins__: self.builtin = space.builtin.pick_builtin(w_globals) @@ -144,8 +143,8 @@ def execute_frame(self, w_inputvalue=None, operr=None): """Execute this frame. Main entry point to the interpreter. The optional arguments are there to handle a generator's frame: - w_inputvalue is for generator.send()) and operr is for - generator.throw()). + w_inputvalue is for generator.send() and operr is for + generator.throw(). """ # the following 'assert' is an annotation hint: it hides from # the annotator all methods that are defined in PyFrame but @@ -195,7 +194,7 @@ def popvalue(self): depth = self.valuestackdepth - 1 - assert depth >= self.nlocals, "pop from empty value stack" + assert depth >= self.pycode.co_nlocals, "pop from empty value stack" w_object = self.locals_stack_w[depth] self.locals_stack_w[depth] = None self.valuestackdepth = depth @@ -223,7 +222,7 @@ def peekvalues(self, n): values_w = [None] * n base = self.valuestackdepth - n - assert base >= self.nlocals + assert base >= self.pycode.co_nlocals while True: n -= 1 if n < 0: @@ -235,7 +234,8 @@ def dropvalues(self, n): n = hint(n, promote=True) finaldepth = self.valuestackdepth - n - assert finaldepth >= self.nlocals, "stack underflow in dropvalues()" + assert finaldepth >= self.pycode.co_nlocals, ( + "stack underflow in dropvalues()") while True: n -= 1 if n < 0: @@ -267,13 +267,15 @@ # Contrast this with CPython where it's PEEK(-1). index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top - assert index >= self.nlocals, "peek past the bottom of the stack" + assert index >= self.pycode.co_nlocals, ( + "peek past the bottom of the stack") return self.locals_stack_w[index] def settopvalue(self, w_object, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top - assert index >= self.nlocals, "settop past the bottom of the stack" + assert index >= self.pycode.co_nlocals, ( + "settop past the bottom of the stack") self.locals_stack_w[index] = w_object @jit.unroll_safe @@ -320,12 +322,13 @@ else: f_lineno = self.f_lineno - values_w = self.locals_stack_w[self.nlocals:self.valuestackdepth] + nlocals = self.pycode.co_nlocals + values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( - space, self.locals_stack_w[:self.nlocals]) + space, self.locals_stack_w[:nlocals]) if self.last_exception is None: w_exc_value = space.w_None w_tb = space.w_None @@ -442,7 +445,7 @@ """Initialize the fast locals from a list of values, where the order is according to self.pycode.signature().""" scope_len = len(scope_w) - if scope_len > self.nlocals: + if scope_len > self.pycode.co_nlocals: raise ValueError, "new fastscope is longer than the allocated area" # don't assign directly to 'locals_stack_w[:scope_len]' to be # virtualizable-friendly @@ -456,7 +459,7 @@ pass def getfastscopelength(self): - return self.nlocals + return self.pycode.co_nlocals def getclosure(self): return None diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -16,6 +16,7 @@ gen_emit_unary_float_op, saved_registers, count_reg_args) +from pypy.jit.backend.arm.helper.regalloc import check_imm_arg from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder from pypy.jit.backend.arm.jump import remap_frame_layout from pypy.jit.backend.arm.regalloc import TempInt, TempPtr @@ -261,7 +262,6 @@ def emit_op_guard_overflow(self, op, arglocs, regalloc, fcond): return self._emit_guard(op, arglocs, c.VS, save_exc=False) - # from ../x86/assembler.py:1265 def emit_op_guard_class(self, op, arglocs, regalloc, fcond): self._cmp_guard_class(op, arglocs, regalloc, fcond) self._emit_guard(op, arglocs[3:], c.EQ, save_exc=False) @@ -279,8 +279,12 @@ self.mc.LDR_ri(r.ip.value, locs[0].value, offset.value, cond=fcond) self.mc.CMP_rr(r.ip.value, locs[1].value, cond=fcond) else: - raise NotImplementedError - # XXX port from x86 backend once gc support is in place + typeid = locs[1] + self.mc.LDRH_ri(r.ip.value, locs[0].value, cond=fcond) + if typeid.is_imm(): + self.mc.CMP_ri(r.ip.value, typeid.value, cond=fcond) + else: + self.mc.CMP_rr(r.ip.value, typeid.value, cond=fcond) def emit_op_guard_not_invalidated(self, op, locs, regalloc, fcond): return self._emit_guard(op, locs, fcond, save_exc=False, @@ -531,16 +535,10 @@ else: raise AssertionError(opnum) loc_base = arglocs[0] - self.mc.LDR_ri(r.ip.value, loc_base.value) - # calculate the shift value to rotate the ofs according to the ARM - # shifted imm values - # (4 - 0) * 4 & 0xF = 0 - # (4 - 1) * 4 & 0xF = 12 - # (4 - 2) * 4 & 0xF = 8 - # (4 - 3) * 4 & 0xF = 4 - ofs = (((4 - descr.jit_wb_if_flag_byteofs) * 4) & 0xF) << 8 - ofs |= descr.jit_wb_if_flag_singlebyte - self.mc.TST_ri(r.ip.value, imm=ofs) + assert check_imm_arg(descr.jit_wb_if_flag_byteofs) + assert check_imm_arg(descr.jit_wb_if_flag_singlebyte) + self.mc.LDRB_ri(r.ip.value, loc_base.value, imm=descr.jit_wb_if_flag_byteofs) + self.mc.TST_ri(r.ip.value, imm=descr.jit_wb_if_flag_singlebyte) jz_location = self.mc.currpos() self.mc.BKPT() @@ -548,11 +546,10 @@ # for cond_call_gc_wb_array, also add another fast path: # if GCFLAG_CARDS_SET, then we can just set one bit and be done if card_marking: - # calculate the shift value to rotate the ofs according to the ARM - # shifted imm values - ofs = (((4 - descr.jit_wb_cards_set_byteofs) * 4) & 0xF) << 8 - ofs |= descr.jit_wb_cards_set_singlebyte - self.mc.TST_ri(r.ip.value, imm=ofs) + assert check_imm_arg(descr.jit_wb_cards_set_byteofs) + assert check_imm_arg(descr.jit_wb_cards_set_singlebyte) + self.mc.LDRB_ri(r.ip.value, loc_base.value, imm=descr.jit_wb_cards_set_byteofs) + self.mc.TST_ri(r.ip.value, imm=descr.jit_wb_cards_set_singlebyte) # jnz_location = self.mc.currpos() self.mc.BKPT() diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -22,7 +22,8 @@ from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.llsupport.descr import ArrayDescr from pypy.jit.backend.llsupport import symbolic -from pypy.rpython.lltypesystem import lltype, rffi, rstr +from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory +from pypy.rpython.lltypesystem.lloperation import llop from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.llsupport.descr import unpack_arraydescr from pypy.jit.backend.llsupport.descr import unpack_fielddescr @@ -653,15 +654,44 @@ boxes = op.getarglist() x = self._ensure_value_is_boxed(boxes[0], boxes) - y = self.get_scratch_reg(INT, forbidden_vars=boxes) y_val = rffi.cast(lltype.Signed, op.getarg(1).getint()) - self.assembler.load(y, imm(y_val)) + + arglocs = [x, None, None] offset = self.cpu.vtable_offset - assert offset is not None - assert check_imm_arg(offset) - offset_loc = imm(offset) - arglocs = self._prepare_guard(op, [x, y, offset_loc]) + if offset is not None: + y = self.get_scratch_reg(INT, forbidden_vars=boxes) + self.assembler.load(y, imm(y_val)) + + assert check_imm_arg(offset) + offset_loc = imm(offset) + + arglocs[1] = y + arglocs[2] = offset_loc + else: + # XXX hard-coded assumption: to go from an object to its class + # we use the following algorithm: + # - read the typeid from mem(locs[0]), i.e. at offset 0 + # - keep the lower 16 bits read there + # - multiply by 4 and use it as an offset in type_info_group + # - add 16 bytes, to go past the TYPE_INFO structure + classptr = y_val + # here, we have to go back from 'classptr' to the value expected + # from reading the 16 bits in the object header + from pypy.rpython.memory.gctypelayout import GCData + sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) + type_info_group = llop.gc_get_type_info_group(llmemory.Address) + type_info_group = rffi.cast(lltype.Signed, type_info_group) + expected_typeid = classptr - sizeof_ti - type_info_group + expected_typeid >>= 2 + if check_imm_arg(expected_typeid): + arglocs[1] = imm(expected_typeid) + else: + y = self.get_scratch_reg(INT, forbidden_vars=boxes) + self.assembler.load(y, imm(expected_typeid)) + arglocs[1] = y + + return self._prepare_guard(op, arglocs) return arglocs @@ -978,7 +1008,7 @@ prepare_op_debug_merge_point = void prepare_op_jit_debug = void - prepare_keepalive = void + prepare_op_keepalive = void def prepare_op_cond_call_gc_wb(self, op, fcond): assert op.result is None diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -15,9 +15,6 @@ gcdescr=None): if gcdescr is not None: gcdescr.force_index_ofs = FORCE_INDEX_OFS - # XXX for now the arm backend does not support the gcremovetypeptr - # translation option - assert gcdescr.config.translation.gcremovetypeptr is False AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) diff --git a/pypy/jit/backend/arm/test/test_ztranslation.py b/pypy/jit/backend/arm/test/test_ztranslation.py --- a/pypy/jit/backend/arm/test/test_ztranslation.py +++ b/pypy/jit/backend/arm/test/test_ztranslation.py @@ -173,3 +173,87 @@ bound = res & ~255 assert 1024 <= bound <= 131072 assert bound & (bound-1) == 0 # a power of two + +class TestTranslationRemoveTypePtrARM(CCompiledMixin): + CPUClass = getcpuclass() + + def _get_TranslationContext(self): + t = TranslationContext() + t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' + t.config.translation.gcrootfinder = 'shadowstack' + t.config.translation.list_comprehension_operations = True + t.config.translation.gcremovetypeptr = True + return t + + def test_external_exception_handling_translates(self): + jitdriver = JitDriver(greens = [], reds = ['n', 'total']) + + class ImDone(Exception): + def __init__(self, resvalue): + self.resvalue = resvalue + + @dont_look_inside + def f(x, total): + if x <= 30: + raise ImDone(total * 10) + if x > 200: + return 2 + raise ValueError + @dont_look_inside + def g(x): + if x > 150: + raise ValueError + return 2 + class Base: + def meth(self): + return 2 + class Sub(Base): + def meth(self): + return 1 + @dont_look_inside + def h(x): + if x < 20000: + return Sub() + else: + return Base() + def myportal(i): + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) + total = 0 + n = i + while True: + jitdriver.can_enter_jit(n=n, total=total) + jitdriver.jit_merge_point(n=n, total=total) + try: + total += f(n, total) + except ValueError: + total += 1 + try: + total += g(n) + except ValueError: + total -= 1 + n -= h(n).meth() # this is to force a GUARD_CLASS + def main(i): + try: + myportal(i) + except ImDone, e: + return e.resvalue + + # XXX custom fishing, depends on the exact env var and format + logfile = udir.join('test_ztranslation.log') + os.environ['PYPYLOG'] = 'jit-log-opt:%s' % (logfile,) + try: + res = self.meta_interp(main, [400]) + assert res == main(400) + finally: + del os.environ['PYPYLOG'] + + guard_class = 0 + for line in open(str(logfile)): + if 'guard_class' in line: + guard_class += 1 + # if we get many more guard_classes, it means that we generate + # guards that always fail (the following assert's original purpose + # is to catch the following case: each GUARD_CLASS is misgenerated + # and always fails with "gcremovetypeptr") + assert 0 < guard_class < 10 diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,6 +20,7 @@ from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated @@ -929,6 +930,11 @@ raise NotImplementedError def op_call(self, calldescr, func, *args): + effectinfo = calldescr.get_extra_info() + if effectinfo is not None: + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_MATH_SQRT: + return do_math_sqrt(args[0]) return self._do_call(calldescr, func, args, call_with_llptr=False) def op_call_release_gil(self, calldescr, func, *args): @@ -1626,6 +1632,12 @@ assert 0 <= dststart <= dststart + length <= len(dst.chars) rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) +def do_math_sqrt(value): + import math + y = cast_from_floatstorage(lltype.Float, value) + x = math.sqrt(y) + return cast_to_floatstorage(x) + # ---------- call ---------- _call_args_i = [] diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -179,6 +179,8 @@ def _compile_operations(self, c, operations, var2index, clt): for op in operations: + if op.getopnum() == -124: # force_spill + continue llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() if isinstance(descr, Descr): diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -22,7 +22,7 @@ total_freed_bridges = 0 # for heaptracker - _all_size_descrs_with_vtable = None + # _all_size_descrs_with_vtable = None _vtable_to_descr_dict = None diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2443,6 +2443,35 @@ print 'step 4 ok' print '-'*79 + def test_guard_not_invalidated_and_label(self): + # test that the guard_not_invalidated reserves enough room before + # the label. If it doesn't, then in this example after we invalidate + # the guard, jumping to the label will hit the invalidation code too + cpu = self.cpu + i0 = BoxInt() + faildescr = BasicFailDescr(1) + labeldescr = TargetToken() + ops = [ + ResOperation(rop.GUARD_NOT_INVALIDATED, [], None, descr=faildescr), + ResOperation(rop.LABEL, [i0], None, descr=labeldescr), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(3)), + ] + ops[0].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop([i0], ops, looptoken) + # mark as failing + self.cpu.invalidate_loop(looptoken) + # attach a bridge + i2 = BoxInt() + ops = [ + ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), + ] + self.cpu.compile_bridge(faildescr, [], ops, looptoken) + # run: must not be caught in an infinite loop + fail = self.cpu.execute_token(looptoken, 16) + assert fail.identifier == 3 + assert self.cpu.get_latest_value_int(0) == 333 + # pure do_ / descr features def test_do_operations(self): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -168,7 +168,6 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None - self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -205,8 +204,13 @@ operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] + self.min_bytes_before_label = 0 return operations + def ensure_next_label_is_at_least_at_position(self, at_least_position): + self.min_bytes_before_label = max(self.min_bytes_before_label, + at_least_position) + def reserve_param(self, n): self.param_depth = max(self.param_depth, n) @@ -464,7 +468,11 @@ self.assembler.mc.mark_op(None) # end of the loop def flush_loop(self): - # rare case: if the loop is too short, pad with NOPs + # rare case: if the loop is too short, or if we are just after + # a GUARD_NOT_INVALIDATED, pad with NOPs. Important! This must + # be called to ensure that there are enough bytes produced, + # because GUARD_NOT_INVALIDATED or redirect_call_assembler() + # will maybe overwrite them. mc = self.assembler.mc while mc.get_relative_pos() < self.min_bytes_before_label: mc.NOP() @@ -500,7 +508,15 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) - consider_guard_not_invalidated = consider_guard_no_exception + def consider_guard_not_invalidated(self, op): + mc = self.assembler.mc + n = mc.get_relative_pos() + self.perform_guard(op, [], None) + assert n == mc.get_relative_pos() + # ensure that the next label is at least 5 bytes farther than + # the current position. Otherwise, when invalidating the guard, + # we would overwrite randomly the next label's position. + self.ensure_next_label_is_at_least_at_position(n + 5) def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -89,7 +89,7 @@ except AttributeError: pass assert lltype.typeOf(vtable) == VTABLETYPE - if cpu._all_size_descrs_with_vtable is None: + if not hasattr(cpu, '_all_size_descrs_with_vtable'): cpu._all_size_descrs_with_vtable = [] cpu._vtable_to_descr_dict = None cpu._all_size_descrs_with_vtable.append(sizedescr) @@ -97,7 +97,7 @@ def finish_registering(cpu): # annotation hack for small examples which have no vtable at all - if cpu._all_size_descrs_with_vtable is None: + if not hasattr(cpu, '_all_size_descrs_with_vtable'): vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) register_known_gctype(cpu, vtable, rclass.OBJECT) @@ -108,7 +108,6 @@ # Build the dict {vtable: sizedescr} at runtime. # This is necessary because the 'vtables' are just pointers to # static data, so they can't be used as keys in prebuilt dicts. - assert cpu._all_size_descrs_with_vtable is not None d = cpu._vtable_to_descr_dict if d is None: d = cpu._vtable_to_descr_dict = {} @@ -130,4 +129,3 @@ vtable = descr.as_vtable_size_descr()._corresponding_vtable vtable = llmemory.cast_ptr_to_adr(vtable) return adr2int(vtable) - diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -195,7 +195,14 @@ def test_random_effects_on_stacklet_switch(): from pypy.jit.backend.llgraph.runner import LLtypeCPU - from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + from pypy.translator.platform import CompilationError + try: + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + except CompilationError as e: + if "Unsupported platform!" in e.out: + py.test.skip("Unsupported platform!") + else: + raise e @jit.dont_look_inside def f(): switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -134,7 +134,10 @@ assert a == 'a\nbxxxxxxx' def test_nonblocking_read(self): - import os, fcntl + try: + import os, fcntl + except ImportError: + skip("need fcntl to set nonblocking mode") r_fd, w_fd = os.pipe() # set nonblocking fcntl.fcntl(r_fd, fcntl.F_SETFL, os.O_NONBLOCK) diff --git a/pypy/module/_minimal_curses/test/test_curses.py b/pypy/module/_minimal_curses/test/test_curses.py --- a/pypy/module/_minimal_curses/test/test_curses.py +++ b/pypy/module/_minimal_curses/test/test_curses.py @@ -18,6 +18,7 @@ """ def _spawn(self, *args, **kwds): import pexpect + kwds.setdefault('timeout', 600) print 'SPAWN:', args, kwds child = pexpect.spawn(*args, **kwds) child.logfile = sys.stdout diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -23,6 +23,7 @@ from pypy.interpreter.function import StaticMethod from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property +from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView from pypy.rlib.entrypoint import entrypoint from pypy.rlib.unroll import unrolling_iterable @@ -383,6 +384,7 @@ "Dict": "space.w_dict", "Tuple": "space.w_tuple", "List": "space.w_list", + "Set": "space.w_set", "Int": "space.w_int", "Bool": "space.w_bool", "Float": "space.w_float", @@ -397,6 +399,7 @@ 'Module': 'space.gettypeobject(Module.typedef)', 'Property': 'space.gettypeobject(W_Property.typedef)', 'Slice': 'space.gettypeobject(W_SliceObject.typedef)', + 'Class': 'space.gettypeobject(W_ClassObject.typedef)', 'StaticMethod': 'space.gettypeobject(StaticMethod.typedef)', 'CFunction': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'WrapperDescr': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' @@ -432,16 +435,16 @@ ('buf', rffi.VOIDP), ('obj', PyObject), ('len', Py_ssize_t), - # ('itemsize', Py_ssize_t), + ('itemsize', Py_ssize_t), - # ('readonly', lltype.Signed), - # ('ndim', lltype.Signed), - # ('format', rffi.CCHARP), - # ('shape', Py_ssize_tP), - # ('strides', Py_ssize_tP), - # ('suboffets', Py_ssize_tP), - # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - # ('internal', rffi.VOIDP) + ('readonly', lltype.Signed), + ('ndim', lltype.Signed), + ('format', rffi.CCHARP), + ('shape', Py_ssize_tP), + ('strides', Py_ssize_tP), + ('suboffsets', Py_ssize_tP), + #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), + ('internal', rffi.VOIDP) )) @specialize.memo() diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -6,6 +6,7 @@ from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError +from pypy.rlib.objectmodel import specialize @cpython_api([], PyObject) def PyDict_New(space): @@ -191,3 +192,24 @@ raise return 0 return 1 + + at specialize.memo() +def make_frozendict(space): + return space.appexec([], '''(): + import collections + class FrozenDict(collections.Mapping): + def __init__(self, *args, **kwargs): + self._d = dict(*args, **kwargs) + def __iter__(self): + return iter(self._d) + def __len__(self): + return len(self._d) + def __getitem__(self, key): + return self._d[key] + return FrozenDict''') + + at cpython_api([PyObject], PyObject) +def PyDictProxy_New(space, w_dict): + w_frozendict = make_frozendict(space) + return space.call_function(w_frozendict, w_dict) + diff --git a/pypy/module/cpyext/include/methodobject.h b/pypy/module/cpyext/include/methodobject.h --- a/pypy/module/cpyext/include/methodobject.h +++ b/pypy/module/cpyext/include/methodobject.h @@ -26,6 +26,7 @@ PyObject_HEAD PyMethodDef *m_ml; /* Description of the C function to call */ PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */ + PyObject *m_module; /* The __module__ attribute, can be anything */ } PyCFunctionObject; /* Flag passed to newmethodobject */ diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -131,18 +131,18 @@ /* This is Py_ssize_t so it can be pointed to by strides in simple case.*/ - /* Py_ssize_t itemsize; */ - /* int readonly; */ - /* int ndim; */ - /* char *format; */ - /* Py_ssize_t *shape; */ - /* Py_ssize_t *strides; */ - /* Py_ssize_t *suboffsets; */ + Py_ssize_t itemsize; + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - /* void *internal; */ + void *internal; } Py_buffer; diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,12 +21,12 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 1 +#define PY_MICRO_VERSION 2 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.1" +#define PY_VERSION "2.7.2" /* PyPy version as a string */ #define PYPY_VERSION "1.8.1" diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -10,6 +10,7 @@ typedef struct _ts { PyInterpreterState *interp; + PyObject *dict; /* Stores per-thread state */ } PyThreadState; #define Py_BEGIN_ALLOW_THREADS { \ @@ -24,4 +25,6 @@ enum {PyGILState_LOCKED, PyGILState_UNLOCKED} PyGILState_STATE; +#define PyThreadState_GET() PyThreadState_Get() + #endif /* !Py_PYSTATE_H */ diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h --- a/pypy/module/cpyext/include/pythread.h +++ b/pypy/module/cpyext/include/pythread.h @@ -1,6 +1,8 @@ #ifndef Py_PYTHREAD_H #define Py_PYTHREAD_H +#define WITH_THREAD + typedef void *PyThread_type_lock; #define WAIT_LOCK 1 #define NOWAIT_LOCK 0 diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -20,7 +20,7 @@ } PyMemberDef; -/* Types */ +/* Types. These constants are also in structmemberdefs.py. */ #define T_SHORT 0 #define T_INT 1 #define T_LONG 2 @@ -42,9 +42,12 @@ #define T_LONGLONG 17 #define T_ULONGLONG 18 -/* Flags */ +/* Flags. These constants are also in structmemberdefs.py. */ #define READONLY 1 #define RO READONLY /* Shorthand */ +#define READ_RESTRICTED 2 +#define PY_WRITE_RESTRICTED 4 +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) #ifdef __cplusplus diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -32,6 +32,7 @@ PyObjectFields + ( ('m_ml', lltype.Ptr(PyMethodDef)), ('m_self', PyObject), + ('m_module', PyObject), )) PyCFunctionObject = lltype.Ptr(PyCFunctionObjectStruct) @@ -47,11 +48,13 @@ assert isinstance(w_obj, W_PyCFunctionObject) py_func.c_m_ml = w_obj.ml py_func.c_m_self = make_ref(space, w_obj.w_self) + py_func.c_m_module = make_ref(space, w_obj.w_module) @cpython_api([PyObject], lltype.Void, external=False) def cfunction_dealloc(space, py_obj): py_func = rffi.cast(PyCFunctionObject, py_obj) Py_DecRef(space, py_func.c_m_self) + Py_DecRef(space, py_func.c_m_module) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -381,6 +381,15 @@ This is the equivalent of the Python expression hash(o).""" return space.int_w(space.hash(w_obj)) + at cpython_api([PyObject], PyObject) +def PyObject_Dir(space, w_o): + """This is equivalent to the Python expression dir(o), returning a (possibly + empty) list of strings appropriate for the object argument, or NULL if there + was an error. If the argument is NULL, this is like the Python dir(), + returning the names of the current locals; in this case, if no execution frame + is active then NULL is returned but PyErr_Occurred() will return false.""" + return space.call_function(space.builtin.get('dir'), w_o) + @cpython_api([PyObject, rffi.CCHARPP, Py_ssize_tP], rffi.INT_real, error=-1) def PyObject_AsCharBuffer(space, obj, bufferp, sizep): """Returns a pointer to a read-only memory location usable as @@ -430,6 +439,8 @@ return 0 +PyBUF_WRITABLE = 0x0001 # Copied from object.h + @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): @@ -445,6 +456,18 @@ view.c_len = length view.c_obj = obj Py_IncRef(space, obj) + view.c_itemsize = 1 + if flags & PyBUF_WRITABLE: + rffi.setintfield(view, 'c_readonly', 0) + else: + rffi.setintfield(view, 'c_readonly', 1) + rffi.setintfield(view, 'c_ndim', 0) + view.c_format = lltype.nullptr(rffi.CCHARP.TO) + view.c_shape = lltype.nullptr(Py_ssize_tP.TO) + view.c_strides = lltype.nullptr(Py_ssize_tP.TO) + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -1,7 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CONST_STRING, FILEP, build_type_checkers) + cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers) from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.interpreter.error import OperationError from pypy.module._file.interp_file import W_File @@ -61,11 +62,49 @@ def PyFile_WriteString(space, s, w_p): """Write string s to file object p. Return 0 on success or -1 on failure; the appropriate exception will be set.""" - w_s = space.wrap(rffi.charp2str(s)) - space.call_method(w_p, "write", w_s) + w_str = space.wrap(rffi.charp2str(s)) + space.call_method(w_p, "write", w_str) + return 0 + + at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) +def PyFile_WriteObject(space, w_obj, w_p, flags): + """ + Write object obj to file object p. The only supported flag for flags is + Py_PRINT_RAW; if given, the str() of the object is written + instead of the repr(). Return 0 on success or -1 on failure; the + appropriate exception will be set.""" + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) + space.call_method(w_p, "write", w_str) return 0 @cpython_api([PyObject], PyObject) def PyFile_Name(space, w_p): """Return the name of the file specified by p as a string object.""" - return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) \ No newline at end of file + return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) + + at cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) +def PyFile_SoftSpace(space, w_p, newflag): + """ + This function exists for internal use by the interpreter. Set the + softspace attribute of p to newflag and return the previous value. + p does not have to be a file object for this function to work + properly; any object is supported (thought its only interesting if + the softspace attribute can be set). This function clears any + errors, and will return 0 as the previous value if the attribute + either does not exist or if there were errors in retrieving it. + There is no way to detect errors from this function, but doing so + should not be needed.""" + try: + if rffi.cast(lltype.Signed, newflag): + w_newflag = space.w_True + else: + w_newflag = space.w_False + oldflag = space.int_w(space.getattr(w_p, space.wrap("softspace"))) + space.setattr(w_p, space.wrap("softspace"), w_newflag) + return oldflag + except OperationError, e: + return 0 + diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,12 +1,19 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) +from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref from pypy.rpython.lltypesystem import rffi, lltype PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) cpython_struct( - "PyInterpreterState", [('next', PyInterpreterState)], PyInterpreterStateStruct) -PyThreadState = lltype.Ptr(cpython_struct("PyThreadState", [('interp', PyInterpreterState)])) + "PyInterpreterState", + [('next', PyInterpreterState)], + PyInterpreterStateStruct) +PyThreadState = lltype.Ptr(cpython_struct( + "PyThreadState", + [('interp', PyInterpreterState), + ('dict', PyObject), + ])) @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): @@ -38,41 +45,49 @@ return 1 # XXX: might be generally useful -def encapsulator(T, flavor='raw'): +def encapsulator(T, flavor='raw', dealloc=None): class MemoryCapsule(object): - def __init__(self, alloc=True): - if alloc: + def __init__(self, space): + self.space = space + if space is not None: self.memory = lltype.malloc(T, flavor=flavor) else: self.memory = lltype.nullptr(T) def __del__(self): if self.memory: + if dealloc and self.space: + dealloc(self.memory, self.space) lltype.free(self.memory, flavor=flavor) return MemoryCapsule -ThreadStateCapsule = encapsulator(PyThreadState.TO) +def ThreadState_dealloc(ts, space): + assert space is not None + Py_DecRef(space, ts.c_dict) +ThreadStateCapsule = encapsulator(PyThreadState.TO, + dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext -ExecutionContext.cpyext_threadstate = ThreadStateCapsule(alloc=False) +ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) - def new_thread_state(self): - capsule = ThreadStateCapsule() + def new_thread_state(self, space): + capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state + ts.c_dict = make_ref(space, space.newdict()) return capsule def get_thread_state(self, space): ec = space.getexecutioncontext() - return self._get_thread_state(ec).memory + return self._get_thread_state(space, ec).memory - def _get_thread_state(self, ec): + def _get_thread_state(self, space, ec): if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): - ec.cpyext_threadstate = self.new_thread_state() + ec.cpyext_threadstate = self.new_thread_state(space) return ec.cpyext_threadstate @@ -81,6 +96,11 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) + at cpython_api([], PyObject, error=CANNOT_FAIL) +def PyThreadState_GetDict(space): + state = space.fromcache(InterpreterState) + return state.get_thread_state(space).c_dict + @cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument diff --git a/pypy/module/cpyext/pythonrun.py b/pypy/module/cpyext/pythonrun.py --- a/pypy/module/cpyext/pythonrun.py +++ b/pypy/module/cpyext/pythonrun.py @@ -14,6 +14,20 @@ value.""" return space.fromcache(State).get_programname() + at cpython_api([], rffi.CCHARP) +def Py_GetVersion(space): + """Return the version of this Python interpreter. This is a + string that looks something like + + "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]" + + The first word (up to the first space character) is the current + Python version; the first three characters are the major and minor + version separated by a period. The returned string points into + static storage; the caller should not modify its value. The value + is available to Python code as sys.version.""" + return space.fromcache(State).get_version() + @cpython_api([lltype.Ptr(lltype.FuncType([], lltype.Void))], rffi.INT_real, error=-1) def Py_AtExit(space, func_ptr): """Register a cleanup function to be called by Py_Finalize(). The cleanup diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py --- a/pypy/module/cpyext/setobject.py +++ b/pypy/module/cpyext/setobject.py @@ -54,6 +54,20 @@ return 0 + at cpython_api([PyObject], PyObject) +def PySet_Pop(space, w_set): + """Return a new reference to an arbitrary object in the set, and removes the + object from the set. Return NULL on failure. Raise KeyError if the + set is empty. Raise a SystemError if set is an not an instance of + set or its subtype.""" + return space.call_method(w_set, "pop") + + at cpython_api([PyObject], rffi.INT_real, error=-1) +def PySet_Clear(space, w_set): + """Empty an existing set of all elements.""" + space.call_method(w_set, 'clear') + return 0 + @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PySet_GET_SIZE(space, w_s): """Macro form of PySet_Size() without error checking.""" diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -10,6 +10,7 @@ self.space = space self.reset() self.programname = lltype.nullptr(rffi.CCHARP.TO) + self.version = lltype.nullptr(rffi.CCHARP.TO) def reset(self): from pypy.module.cpyext.modsupport import PyMethodDef @@ -102,6 +103,15 @@ lltype.render_immortal(self.programname) return self.programname + def get_version(self): + if not self.version: + space = self.space + w_version = space.sys.get('version') + version = space.str_w(w_version) + self.version = rffi.str2charp(version) + lltype.render_immortal(self.version) + return self.version + def find_extension(self, name, path): from pypy.module.cpyext.modsupport import PyImport_AddModule from pypy.interpreter.module import Module diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -250,6 +250,26 @@ s = rffi.charp2str(string) return space.new_interned_str(s) + at cpython_api([PyObjectP], lltype.Void) +def PyString_InternInPlace(space, string): + """Intern the argument *string in place. The argument must be the + address of a pointer variable pointing to a Python string object. + If there is an existing interned string that is the same as + *string, it sets *string to it (decrementing the reference count + of the old string object and incrementing the reference count of + the interned string object), otherwise it leaves *string alone and + interns it (incrementing its reference count). (Clarification: + even though there is a lot of talk about reference counts, think + of this function as reference-count-neutral; you own the object + after the call if and only if you owned it before the call.) + + This function is not available in 3.x and does not have a PyBytes + alias.""" + w_str = from_ref(space, string[0]) + w_str = space.new_interned_w_str(w_str) + Py_DecRef(space, string[0]) + string[0] = make_ref(space, w_str) + @cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject) def PyString_AsEncodedObject(space, w_str, encoding, errors): """Encode a string object using the codec registered for encoding and return diff --git a/pypy/module/cpyext/structmemberdefs.py b/pypy/module/cpyext/structmemberdefs.py --- a/pypy/module/cpyext/structmemberdefs.py +++ b/pypy/module/cpyext/structmemberdefs.py @@ -1,3 +1,5 @@ +# These constants are also in include/structmember.h + T_SHORT = 0 T_INT = 1 T_LONG = 2 @@ -18,3 +20,6 @@ T_ULONGLONG = 18 READONLY = RO = 1 +READ_RESTRICTED = 2 +WRITE_RESTRICTED = 4 +RESTRICTED = READ_RESTRICTED | WRITE_RESTRICTED diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer + cpython_api, PyObject, PyObjectP, CANNOT_FAIL ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,6 +10,7 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP +Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -32,10 +33,6 @@ def _PyObject_Del(space, op): raise NotImplementedError - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyObject_CheckBuffer(space, obj): - raise NotImplementedError - @cpython_api([rffi.CCHARP], Py_ssize_t, error=CANNOT_FAIL) def PyBuffer_SizeFromFormat(space, format): """Return the implied ~Py_buffer.itemsize from the struct-stype @@ -684,28 +681,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) -def PyFile_SoftSpace(space, p, newflag): - """ - This function exists for internal use by the interpreter. Set the - softspace attribute of p to newflag and return the previous value. - p does not have to be a file object for this function to work properly; any - object is supported (thought its only interesting if the softspace - attribute can be set). This function clears any errors, and will return 0 - as the previous value if the attribute either does not exist or if there were - errors in retrieving it. There is no way to detect errors from this function, - but doing so should not be needed.""" - raise NotImplementedError - - at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) -def PyFile_WriteObject(space, obj, p, flags): - """ - Write object obj to file object p. The only supported flag for flags is - Py_PRINT_RAW; if given, the str() of the object is written - instead of the repr(). Return 0 on success or -1 on failure; the - appropriate exception will be set.""" - raise NotImplementedError - @cpython_api([], PyObject) def PyFloat_GetInfo(space): """Return a structseq instance which contains information about the @@ -1097,19 +1072,6 @@ raise NotImplementedError @cpython_api([], rffi.CCHARP) -def Py_GetVersion(space): - """Return the version of this Python interpreter. This is a string that looks - something like - - "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]" - - The first word (up to the first space character) is the current Python version; - the first three characters are the major and minor version separated by a - period. The returned string points into static storage; the caller should not - modify its value. The value is available to Python code as sys.version.""" - raise NotImplementedError - - at cpython_api([], rffi.CCHARP) def Py_GetPlatform(space): """Return the platform identifier for the current platform. On Unix, this is formed from the"official" name of the operating system, converted to lower @@ -1685,15 +1647,6 @@ """ raise NotImplementedError - at cpython_api([PyObject], PyObject) -def PyObject_Dir(space, o): - """This is equivalent to the Python expression dir(o), returning a (possibly - empty) list of strings appropriate for the object argument, or NULL if there - was an error. If the argument is NULL, this is like the Python dir(), - returning the names of the current locals; in this case, if no execution frame - is active then NULL is returned but PyErr_Occurred() will return false.""" - raise NotImplementedError - @cpython_api([], PyFrameObject) def PyEval_GetFrame(space): """Return the current thread state's frame, which is NULL if no frame is @@ -1802,34 +1755,6 @@ building-up new frozensets with PySet_Add().""" raise NotImplementedError - at cpython_api([PyObject], PyObject) -def PySet_Pop(space, set): - """Return a new reference to an arbitrary object in the set, and removes the - object from the set. Return NULL on failure. Raise KeyError if the - set is empty. Raise a SystemError if set is an not an instance of - set or its subtype.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=-1) -def PySet_Clear(space, set): - """Empty an existing set of all elements.""" - raise NotImplementedError - - at cpython_api([PyObjectP], lltype.Void) -def PyString_InternInPlace(space, string): - """Intern the argument *string in place. The argument must be the address of a - pointer variable pointing to a Python string object. If there is an existing - interned string that is the same as *string, it sets *string to it - (decrementing the reference count of the old string object and incrementing the - reference count of the interned string object), otherwise it leaves *string - alone and interns it (incrementing its reference count). (Clarification: even - though there is a lot of talk about reference counts, think of this function as - reference-count-neutral; you own the object after the call if and only if you - owned it before the call.) - - This function is not available in 3.x and does not have a PyBytes alias.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.CCHARP], PyObject) def PyString_Decode(space, s, size, encoding, errors): """Create an object by decoding size bytes of the encoded buffer s using the diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py --- a/pypy/module/cpyext/test/test_classobject.py +++ b/pypy/module/cpyext/test/test_classobject.py @@ -1,4 +1,5 @@ from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.interpreter.function import Function, Method class TestClassObject(BaseApiTest): @@ -51,3 +52,14 @@ assert api.PyInstance_Check(w_instance) assert space.is_true(space.call_method(space.builtin, "isinstance", w_instance, w_class)) + +class AppTestStringObject(AppTestCpythonExtensionBase): + def test_class_type(self): + module = self.import_extension('foo', [ + ("get_classtype", "METH_NOARGS", + """ + Py_INCREF(&PyClass_Type); + return &PyClass_Type; + """)]) + class C: pass + assert module.get_classtype() is type(C) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -744,6 +744,22 @@ print p assert 'py' in p + def test_get_version(self): + mod = self.import_extension('foo', [ + ('get_version', 'METH_NOARGS', + ''' + char* name1 = Py_GetVersion(); + char* name2 = Py_GetVersion(); + if (name1 != name2) + Py_RETURN_FALSE; + return PyString_FromString(name1); + ''' + ), + ]) + p = mod.get_version() + print p + assert 'PyPy' in p + def test_no_double_imports(self): import sys, os try: diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -2,6 +2,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP from pypy.module.cpyext.pyobject import make_ref, from_ref +from pypy.interpreter.error import OperationError class TestDictObject(BaseApiTest): def test_dict(self, space, api): @@ -110,3 +111,13 @@ assert space.eq_w(space.len(w_copy), space.len(w_dict)) assert space.eq_w(w_copy, w_dict) + + def test_dictproxy(self, space, api): + w_dict = space.sys.get('modules') + w_proxy = api.PyDictProxy_New(w_dict) + assert space.is_true(space.contains(w_proxy, space.wrap('sys'))) + raises(OperationError, space.setitem, + w_proxy, space.wrap('sys'), space.w_None) + raises(OperationError, space.delitem, + w_proxy, space.wrap('sys')) + raises(OperationError, space.call_method, w_proxy, 'clear') diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -9,7 +9,7 @@ class AppTestMethodObject(AppTestCpythonExtensionBase): def test_call_METH(self): - mod = self.import_extension('foo', [ + mod = self.import_extension('MyModule', [ ('getarg_O', 'METH_O', ''' Py_INCREF(args); @@ -51,11 +51,23 @@ } ''' ), + ('getModule', 'METH_O', + ''' + if(PyCFunction_Check(args)) { + PyCFunctionObject* func = (PyCFunctionObject*)args; + Py_INCREF(func->m_module); + return func->m_module; + } + else { + Py_RETURN_FALSE; + } + ''' + ), ('isSameFunction', 'METH_O', ''' PyCFunction ptr = PyCFunction_GetFunction(args); if (!ptr) return NULL; - if (ptr == foo_getarg_O) + if (ptr == MyModule_getarg_O) Py_RETURN_TRUE; else Py_RETURN_FALSE; @@ -76,6 +88,7 @@ assert mod.getarg_OLD(1, 2) == (1, 2) assert mod.isCFunction(mod.getarg_O) == "getarg_O" + assert mod.getModule(mod.getarg_O) == 'MyModule' assert mod.isSameFunction(mod.getarg_O) raises(TypeError, mod.isSameFunction, 1) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -191,6 +191,11 @@ assert api.PyObject_Unicode(space.wrap("\xe9")) is None api.PyErr_Clear() + def test_dir(self, space, api): + w_dir = api.PyObject_Dir(space.sys) + assert space.isinstance_w(w_dir, space.w_list) + assert space.is_true(space.contains(w_dir, space.wrap('modules'))) + class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.api import fopen, fclose, fwrite from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.rpython.lltypesystem import rffi, lltype from pypy.tool.udir import udir import pytest @@ -77,3 +78,28 @@ out = out.replace('\r\n', '\n') assert out == "test\n" + def test_file_writeobject(self, space, api, capfd): + w_obj = space.wrap("test\n") + w_stdout = space.sys.get("stdout") + api.PyFile_WriteObject(w_obj, w_stdout, Py_PRINT_RAW) + api.PyFile_WriteObject(w_obj, w_stdout, 0) + space.call_method(w_stdout, "flush") + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == "test\n'test\\n'" + + def test_file_softspace(self, space, api, capfd): + w_stdout = space.sys.get("stdout") + assert api.PyFile_SoftSpace(w_stdout, 1) == 0 + assert api.PyFile_SoftSpace(w_stdout, 0) == 1 + + api.PyFile_SoftSpace(w_stdout, 1) + w_ns = space.newdict() + space.exec_("print 1,", w_ns, w_ns) + space.exec_("print 2,", w_ns, w_ns) + api.PyFile_SoftSpace(w_stdout, 0) + space.exec_("print 3", w_ns, w_ns) + space.call_method(w_stdout, "flush") + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == " 1 23\n" diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -2,6 +2,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState +from pypy.module.cpyext.pyobject import from_ref class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -49,3 +50,10 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) + + def test_threadstate_dict(self, space, api): + ts = api.PyThreadState_Get() + ref = ts.c_dict + assert ref == api.PyThreadState_GetDict() + w_obj = from_ref(space, ref) + assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py --- a/pypy/module/cpyext/test/test_setobject.py +++ b/pypy/module/cpyext/test/test_setobject.py @@ -32,3 +32,13 @@ w_set = api.PySet_New(space.wrap([1,2,3,4])) assert api.PySet_Contains(w_set, space.wrap(1)) assert not api.PySet_Contains(w_set, space.wrap(0)) + + def test_set_pop_clear(self, space, api): + w_set = api.PySet_New(space.wrap([1,2,3,4])) + w_obj = api.PySet_Pop(w_set) + assert space.int_w(w_obj) in (1,2,3,4) + assert space.len_w(w_set) == 3 + api.PySet_Clear(w_set) + assert space.len_w(w_set) == 0 + + diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -166,6 +166,20 @@ res = module.test_string_format(1, "xyz") assert res == "bla 1 ble xyz\n" + def test_intern_inplace(self): + module = self.import_extension('foo', [ + ("test_intern_inplace", "METH_O", + ''' + PyObject *s = args; + Py_INCREF(s); + PyString_InternInPlace(&s); + return s; + ''' + ) + ]) + # This does not test much, but at least the refcounts are checked. + assert module.test_intern_inplace('s') == 's' + class TestString(BaseApiTest): def test_string_resize(self, space, api): py_str = new_empty_str(space, 10) diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -420,3 +420,12 @@ w_seq = space.wrap([u'a', u'b']) w_joined = api.PyUnicode_Join(w_sep, w_seq) assert space.unwrap(w_joined) == u'ab' + + def test_fromordinal(self, space, api): + w_char = api.PyUnicode_FromOrdinal(65) + assert space.unwrap(w_char) == u'A' + w_char = api.PyUnicode_FromOrdinal(0) + assert space.unwrap(w_char) == u'\0' + w_char = api.PyUnicode_FromOrdinal(0xFFFF) + assert space.unwrap(w_char) == u'\uFFFF' + diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -395,6 +395,16 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([rffi.INT_real], PyObject) +def PyUnicode_FromOrdinal(space, ordinal): + """Create a Unicode Object from the given Unicode code point ordinal. + + The ordinal must be in range(0x10000) on narrow Python builds + (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is + raised in case it is not.""" + w_ordinal = space.wrap(rffi.cast(lltype.Signed, ordinal)) + return space.call_function(space.builtin.get('unichr'), w_ordinal) + @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -95,6 +95,7 @@ ("tan", "tan"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), + ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -83,6 +83,8 @@ descr_truediv = _binop_impl("true_divide") descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") + descr_lshift = _binop_impl("left_shift") + descr_rshift = _binop_impl("right_shift") descr_and = _binop_impl("bitwise_and") descr_or = _binop_impl("bitwise_or") descr_xor = _binop_impl("bitwise_xor") @@ -97,13 +99,31 @@ descr_radd = _binop_right_impl("add") descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") + descr_rdiv = _binop_right_impl("divide") + descr_rtruediv = _binop_right_impl("true_divide") + descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") + descr_rlshift = _binop_right_impl("left_shift") + descr_rrshift = _binop_right_impl("right_shift") + descr_rand = _binop_right_impl("bitwise_and") + descr_ror = _binop_right_impl("bitwise_or") + descr_rxor = _binop_right_impl("bitwise_xor") descr_pos = _unaryop_impl("positive") descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") descr_invert = _unaryop_impl("invert") + def descr_divmod(self, space, w_other): + w_quotient = self.descr_div(space, w_other) + w_remainder = self.descr_mod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) + + def descr_rdivmod(self, space, w_other): + w_quotient = self.descr_rdiv(space, w_other) + w_remainder = self.descr_rmod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) + def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) @@ -185,7 +205,10 @@ __div__ = interp2app(W_GenericBox.descr_div), __truediv__ = interp2app(W_GenericBox.descr_truediv), __mod__ = interp2app(W_GenericBox.descr_mod), + __divmod__ = interp2app(W_GenericBox.descr_divmod), __pow__ = interp2app(W_GenericBox.descr_pow), + __lshift__ = interp2app(W_GenericBox.descr_lshift), + __rshift__ = interp2app(W_GenericBox.descr_rshift), __and__ = interp2app(W_GenericBox.descr_and), __or__ = interp2app(W_GenericBox.descr_or), __xor__ = interp2app(W_GenericBox.descr_xor), @@ -193,7 +216,16 @@ __radd__ = interp2app(W_GenericBox.descr_radd), __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), + __rdiv__ = interp2app(W_GenericBox.descr_rdiv), + __rtruediv__ = interp2app(W_GenericBox.descr_rtruediv), + __rmod__ = interp2app(W_GenericBox.descr_rmod), + __rdivmod__ = interp2app(W_GenericBox.descr_rdivmod), __rpow__ = interp2app(W_GenericBox.descr_rpow), + __rlshift__ = interp2app(W_GenericBox.descr_rlshift), + __rrshift__ = interp2app(W_GenericBox.descr_rrshift), + __rand__ = interp2app(W_GenericBox.descr_rand), + __ror__ = interp2app(W_GenericBox.descr_ror), + __rxor__ = interp2app(W_GenericBox.descr_rxor), __eq__ = interp2app(W_GenericBox.descr_eq), __ne__ = interp2app(W_GenericBox.descr_ne), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -4,17 +4,17 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import (interp_ufuncs, interp_dtype, interp_boxes, signature, support, loop) +from pypy.module.micronumpy.appbridge import get_appbridge_cache +from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes +from pypy.module.micronumpy.interp_iter import (ArrayIterator, + SkipLastAxisIterator, Chunk, ViewIterator) from pypy.module.micronumpy.strides import (calculate_slice_strides, shape_agreement, find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) -from dot import multidim_dot, match_dot_shapes from pypy.rlib import jit +from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name -from pypy.rlib.rstring import StringBuilder -from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, ViewIterator) -from pypy.module.micronumpy.appbridge import get_appbridge_cache count_driver = jit.JitDriver( @@ -101,8 +101,14 @@ descr_sub = _binop_impl("subtract") descr_mul = _binop_impl("multiply") descr_div = _binop_impl("divide") + descr_truediv = _binop_impl("true_divide") + descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") - descr_mod = _binop_impl("mod") + descr_lshift = _binop_impl("left_shift") + descr_rshift = _binop_impl("right_shift") + descr_and = _binop_impl("bitwise_and") + descr_or = _binop_impl("bitwise_or") + descr_xor = _binop_impl("bitwise_xor") descr_eq = _binop_impl("equal") descr_ne = _binop_impl("not_equal") @@ -111,8 +117,10 @@ descr_gt = _binop_impl("greater") descr_ge = _binop_impl("greater_equal") - descr_and = _binop_impl("bitwise_and") - descr_or = _binop_impl("bitwise_or") + def descr_divmod(self, space, w_other): + w_quotient = self.descr_div(space, w_other) + w_remainder = self.descr_mod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): def impl(self, space, w_other): @@ -127,8 +135,19 @@ descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_rdiv = _binop_right_impl("divide") + descr_rtruediv = _binop_right_impl("true_divide") + descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") - descr_rmod = _binop_right_impl("mod") + descr_rlshift = _binop_right_impl("left_shift") + descr_rrshift = _binop_right_impl("right_shift") + descr_rand = _binop_right_impl("bitwise_and") + descr_ror = _binop_right_impl("bitwise_or") + descr_rxor = _binop_right_impl("bitwise_xor") + + def descr_rdivmod(self, space, w_other): + w_quotient = self.descr_rdiv(space, w_other) + w_remainder = self.descr_rmod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): def impl(self, space, w_axis=None): @@ -1227,21 +1246,36 @@ __pos__ = interp2app(BaseArray.descr_pos), __neg__ = interp2app(BaseArray.descr_neg), __abs__ = interp2app(BaseArray.descr_abs), + __invert__ = interp2app(BaseArray.descr_invert), __nonzero__ = interp2app(BaseArray.descr_nonzero), __add__ = interp2app(BaseArray.descr_add), __sub__ = interp2app(BaseArray.descr_sub), __mul__ = interp2app(BaseArray.descr_mul), __div__ = interp2app(BaseArray.descr_div), + __truediv__ = interp2app(BaseArray.descr_truediv), + __mod__ = interp2app(BaseArray.descr_mod), + __divmod__ = interp2app(BaseArray.descr_divmod), __pow__ = interp2app(BaseArray.descr_pow), - __mod__ = interp2app(BaseArray.descr_mod), + __lshift__ = interp2app(BaseArray.descr_lshift), + __rshift__ = interp2app(BaseArray.descr_rshift), + __and__ = interp2app(BaseArray.descr_and), + __or__ = interp2app(BaseArray.descr_or), + __xor__ = interp2app(BaseArray.descr_xor), __radd__ = interp2app(BaseArray.descr_radd), __rsub__ = interp2app(BaseArray.descr_rsub), __rmul__ = interp2app(BaseArray.descr_rmul), __rdiv__ = interp2app(BaseArray.descr_rdiv), + __rtruediv__ = interp2app(BaseArray.descr_rtruediv), + __rmod__ = interp2app(BaseArray.descr_rmod), + __rdivmod__ = interp2app(BaseArray.descr_rdivmod), __rpow__ = interp2app(BaseArray.descr_rpow), - __rmod__ = interp2app(BaseArray.descr_rmod), + __rlshift__ = interp2app(BaseArray.descr_rlshift), + __rrshift__ = interp2app(BaseArray.descr_rrshift), + __rand__ = interp2app(BaseArray.descr_rand), + __ror__ = interp2app(BaseArray.descr_ror), + __rxor__ = interp2app(BaseArray.descr_rxor), __eq__ = interp2app(BaseArray.descr_eq), __ne__ = interp2app(BaseArray.descr_ne), @@ -1250,10 +1284,6 @@ __gt__ = interp2app(BaseArray.descr_gt), __ge__ = interp2app(BaseArray.descr_ge), - __and__ = interp2app(BaseArray.descr_and), - __or__ = interp2app(BaseArray.descr_or), - __invert__ = interp2app(BaseArray.descr_invert), - __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), @@ -1267,6 +1297,7 @@ nbytes = GetSetProperty(BaseArray.descr_get_nbytes), T = GetSetProperty(BaseArray.descr_get_transpose), + transpose = interp2app(BaseArray.descr_get_transpose), flat = GetSetProperty(BaseArray.descr_get_flatiter), ravel = interp2app(BaseArray.descr_ravel), item = interp2app(BaseArray.descr_item), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -392,6 +392,8 @@ ("true_divide", "div", 2, {"promote_to_float": True}), ("mod", "mod", 2, {"promote_bools": True}), ("power", "pow", 2, {"promote_bools": True}), + ("left_shift", "lshift", 2, {"int_only": True}), + ("right_shift", "rshift", 2, {"int_only": True}), ("equal", "eq", 2, {"comparison_func": True}), ("not_equal", "ne", 2, {"comparison_func": True}), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -406,15 +406,28 @@ from operator import truediv from _numpypy import float64, int_, True_, False_ + assert 5 / int_(2) == int_(2) assert truediv(int_(3), int_(2)) == float64(1.5) + assert truediv(3, int_(2)) == float64(1.5) + assert int_(8) % int_(3) == int_(2) + assert 8 % int_(3) == int_(2) + assert divmod(int_(8), int_(3)) == (int_(2), int_(2)) + assert divmod(8, int_(3)) == (int_(2), int_(2)) assert 2 ** int_(3) == int_(8) + assert int_(3) << int_(2) == int_(12) + assert 3 << int_(2) == int_(12) + assert int_(8) >> int_(2) == int_(2) + assert 8 >> int_(2) == int_(2) assert int_(3) & int_(1) == int_(1) - raises(TypeError, lambda: float64(3) & 1) - assert int_(8) % int_(3) == int_(2) + assert 2 & int_(3) == int_(2) assert int_(2) | int_(1) == int_(3) + assert 2 | int_(1) == int_(3) assert int_(3) ^ int_(5) == int_(6) assert True_ ^ False_ is True_ + assert 5 ^ int_(3) == int_(6) assert +int_(3) == int_(3) assert ~int_(3) == int_(-4) + raises(TypeError, lambda: float64(3) & 1) + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -625,6 +625,59 @@ for i in range(5): assert b[i] == i / 5.0 + def test_truediv(self): + from operator import truediv + from _numpypy import arange + + assert (truediv(arange(5), 2) == [0., .5, 1., 1.5, 2.]).all() + assert (truediv(2, arange(3)) == [float("inf"), 2., 1.]).all() + + def test_divmod(self): + from _numpypy import arange + + a, b = divmod(arange(10), 3) + assert (a == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3]).all() + assert (b == [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]).all() + + def test_rdivmod(self): + from _numpypy import arange + + a, b = divmod(3, arange(1, 5)) + assert (a == [3, 1, 1, 0]).all() + assert (b == [0, 1, 0, 3]).all() + + def test_lshift(self): + from _numpypy import array + + a = array([0, 1, 2, 3]) + assert (a << 2 == [0, 4, 8, 12]).all() + a = array([True, False]) + assert (a << 2 == [4, 0]).all() + a = array([1.0]) + raises(TypeError, lambda: a << 2) + + def test_rlshift(self): + from _numpypy import arange + + a = arange(3) + assert (2 << a == [2, 4, 8]).all() + + def test_rshift(self): + from _numpypy import arange, array + + a = arange(10) + assert (a >> 2 == [0, 0, 0, 0, 1, 1, 1, 1, 2, 2]).all() + a = array([True, False]) + assert (a >> 1 == [0, 0]).all() + a = arange(3, dtype=float) + raises(TypeError, lambda: a >> 1) + + def test_rrshift(self): + from _numpypy import arange + + a = arange(5) + assert (2 >> a == [2, 1, 0, 0, 0]).all() + def test_pow(self): from _numpypy import array a = array(range(5), float) @@ -678,6 +731,30 @@ for i in range(5): assert b[i] == i % 2 + def test_rand(self): + from _numpypy import arange + + a = arange(5) + assert (3 & a == [0, 1, 2, 3, 0]).all() + + def test_ror(self): + from _numpypy import arange + + a = arange(5) + assert (3 | a == [3, 3, 3, 3, 7]).all() + + def test_xor(self): + from _numpypy import arange + + a = arange(5) + assert (a ^ 3 == [3, 2, 1, 0, 7]).all() + + def test_rxor(self): + from _numpypy import arange + + a = arange(5) + assert (3 ^ a == [3, 2, 1, 0, 7]).all() + def test_pos(self): from _numpypy import array a = array([1., -2., 3., -4., -5.]) @@ -1410,6 +1487,7 @@ a = array((range(10), range(20, 30))) b = a.T assert(b[:, 0] == a[0, :]).all() + assert (a.transpose() == b).all() def test_flatiter(self): from _numpypy import array, flatiter, arange diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -368,14 +368,14 @@ assert b.shape == (1, 4) assert (add.reduce(a, 0, keepdims=True) == [12, 15, 18, 21]).all() - def test_bitwise(self): - from _numpypy import bitwise_and, bitwise_or, arange, array + from _numpypy import bitwise_and, bitwise_or, bitwise_xor, arange, array a = arange(6).reshape(2, 3) assert (a & 1 == [[0, 1, 0], [1, 0, 1]]).all() assert (a & 1 == bitwise_and(a, 1)).all() assert (a | 1 == [[1, 1, 3], [3, 5, 5]]).all() assert (a | 1 == bitwise_or(a, 1)).all() + assert (a ^ 3 == bitwise_xor(a, 3)).all() raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -295,6 +295,14 @@ v1 *= v1 return res + @simple_binary_op + def lshift(self, v1, v2): + return v1 << v2 + + @simple_binary_op + def rshift(self, v1, v2): + return v1 >> v2 + @simple_unary_op def sign(self, v): if v > 0: diff --git a/pypy/module/oracle/interp_error.py b/pypy/module/oracle/interp_error.py --- a/pypy/module/oracle/interp_error.py +++ b/pypy/module/oracle/interp_error.py @@ -72,7 +72,7 @@ get(space).w_InternalError, space.wrap("No Oracle error?")) - self.code = codeptr[0] + self.code = rffi.cast(lltype.Signed, codeptr[0]) self.w_message = config.w_string(space, textbuf) finally: lltype.free(codeptr, flavor='raw') diff --git a/pypy/module/oracle/interp_variable.py b/pypy/module/oracle/interp_variable.py --- a/pypy/module/oracle/interp_variable.py +++ b/pypy/module/oracle/interp_variable.py @@ -359,14 +359,14 @@ # Verifies that truncation or other problems did not take place on # retrieve. if self.isVariableLength: - if rffi.cast(lltype.Signed, self.returnCode[pos]) != 0: + error_code = rffi.cast(lltype.Signed, self.returnCode[pos]) + if error_code != 0: error = W_Error(space, self.environment, "Variable_VerifyFetch()", 0) - error.code = self.returnCode[pos] + error.code = error_code error.message = space.wrap( "column at array pos %d fetched with error: %d" % - (pos, - rffi.cast(lltype.Signed, self.returnCode[pos]))) + (pos, error_code)) w_error = get(space).w_DatabaseError raise OperationError(get(space).w_DatabaseError, diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1049,6 +1049,7 @@ def _spawn(self, *args, **kwds): import pexpect + kwds.setdefault('timeout', 600) print 'SPAWN:', args, kwds child = pexpect.spawn(*args, **kwds) child.logfile = sys.stdout diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -13,6 +13,7 @@ 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'Box': 'interp_resop.WrappedBox', + 'PARAMETER_DOCS': 'space.wrap(pypy.rlib.jit.PARAMETER_DOCS)', } def setup_after_space_initialization(self): diff --git a/pypy/module/pypyjit/test/test_jit_setup.py b/pypy/module/pypyjit/test/test_jit_setup.py --- a/pypy/module/pypyjit/test/test_jit_setup.py +++ b/pypy/module/pypyjit/test/test_jit_setup.py @@ -45,6 +45,12 @@ pypyjit.set_compile_hook(None) pypyjit.set_param('default') + def test_doc(self): + import pypyjit + d = pypyjit.PARAMETER_DOCS + assert type(d) is dict + assert 'threshold' in d + def test_interface_residual_call(): space = gettestobjspace(usemodules=['pypyjit']) diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -7,7 +7,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 1, "final", 42) #XXX # sync patchlevel.h +CPYTHON_VERSION = (2, 7, 2, "final", 42) #XXX # sync patchlevel.h CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h PYPY_VERSION = (1, 8, 1, "dev", 0) #XXX # sync patchlevel.h diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -1,7 +1,10 @@ """Additional tests for datetime.""" +import py + import time import datetime +import copy import os def test_utcfromtimestamp(): @@ -22,3 +25,22 @@ del os.environ["TZ"] else: os.environ["TZ"] = prev_tz + +def test_utcfromtimestamp_microsecond(): + dt = datetime.datetime.utcfromtimestamp(0) + assert isinstance(dt.microsecond, int) + + +def test_integer_args(): + with py.test.raises(TypeError): + datetime.datetime(10, 10, 10.) + with py.test.raises(TypeError): + datetime.datetime(10, 10, 10, 10, 10.) + with py.test.raises(TypeError): + datetime.datetime(10, 10, 10, 10, 10, 10.) + +def test_utcnow_microsecond(): + dt = datetime.datetime.utcnow() + assert type(dt.microsecond) is int + + copy.copy(dt) \ No newline at end of file diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -326,4 +326,5 @@ return w_some_obj() FakeObjSpace.sys = FakeModule() FakeObjSpace.sys.filesystemencoding = 'foobar' +FakeObjSpace.sys.defaultencoding = 'ascii' FakeObjSpace.builtin = FakeModule() diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -410,7 +410,7 @@ w_new = Constant(newvalue) f = self.crnt_frame stack_items_w = f.locals_stack_w - for i in range(f.valuestackdepth-1, f.nlocals-1, -1): + for i in range(f.valuestackdepth-1, f.pycode.co_nlocals-1, -1): w_v = stack_items_w[i] if isinstance(w_v, Constant): if w_v.value is oldvalue: diff --git a/pypy/objspace/flow/test/test_framestate.py b/pypy/objspace/flow/test/test_framestate.py --- a/pypy/objspace/flow/test/test_framestate.py +++ b/pypy/objspace/flow/test/test_framestate.py @@ -25,7 +25,7 @@ dummy = Constant(None) #dummy.dummy = True arg_list = ([Variable() for i in range(formalargcount)] + - [dummy] * (frame.nlocals - formalargcount)) + [dummy] * (frame.pycode.co_nlocals - formalargcount)) frame.setfastscope(arg_list) return frame @@ -42,7 +42,7 @@ def test_neq_hacked_framestate(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) assert fs1 != fs2 @@ -55,7 +55,7 @@ def test_union_on_hacked_framestates(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) assert fs1.union(fs2) == fs2 # fs2 is more general assert fs2.union(fs1) == fs2 # fs2 is more general @@ -63,7 +63,7 @@ def test_restore_frame(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs1.restoreframe(frame) assert fs1 == FrameState(frame) @@ -82,7 +82,7 @@ def test_getoutputargs(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) outputargs = fs1.getoutputargs(fs2) # 'x' -> 'x' is a Variable @@ -92,16 +92,16 @@ def test_union_different_constants(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Constant(42) + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(42) fs2 = FrameState(frame) fs3 = fs1.union(fs2) fs3.restoreframe(frame) - assert isinstance(frame.locals_stack_w[frame.nlocals-1], Variable) - # ^^^ generalized + assert isinstance(frame.locals_stack_w[frame.pycode.co_nlocals-1], + Variable) # generalized def test_union_spectag(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Constant(SpecTag()) + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(SpecTag()) fs2 = FrameState(frame) assert fs1.union(fs2) is None # UnionError diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -238,7 +238,7 @@ self = jit.promote(self) if argchain.numargs != len(self.argtypes): raise TypeError, 'Wrong number of arguments: %d expected, got %d' %\ - (argchain.numargs, len(self.argtypes)) + (len(self.argtypes), argchain.numargs) ll_args = self._prepare() i = 0 arg = argchain.first diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -180,10 +180,15 @@ ('tms_cutime', rffi.INT), ('tms_cstime', rffi.INT)]) - GID_T = platform.SimpleType('gid_t',rffi.INT) + GID_T = platform.SimpleType('gid_t', rffi.INT) #TODO right now is used only in getgroups, may need to update other #functions like setgid + # For now we require off_t to be the same size as LONGLONG, which is the + # interface required by callers of functions that thake an argument of type + # off_t + OFF_T_SIZE = platform.SizeOf('off_t') + SEEK_SET = platform.DefinedConstantInteger('SEEK_SET') SEEK_CUR = platform.DefinedConstantInteger('SEEK_CUR') SEEK_END = platform.DefinedConstantInteger('SEEK_END') @@ -197,6 +202,7 @@ def __init__(self): self.configure(CConfig) + assert self.OFF_T_SIZE == rffi.sizeof(rffi.LONGLONG) if hasattr(os, 'getpgrp'): self.GETPGRP_HAVE_ARG = platform.checkcompiles( @@ -963,7 +969,7 @@ os_lseek = self.llexternal(funcname, [rffi.INT, rffi.LONGLONG, rffi.INT], - rffi.LONGLONG) + rffi.LONGLONG, macro=True) def lseek_llimpl(fd, pos, how): how = fix_seek_arg(how) @@ -988,7 +994,7 @@ @registering_if(os, 'ftruncate') def register_os_ftruncate(self): os_ftruncate = self.llexternal('ftruncate', - [rffi.INT, rffi.LONGLONG], rffi.INT) + [rffi.INT, rffi.LONGLONG], rffi.INT, macro=True) def ftruncate_llimpl(fd, length): res = rffi.cast(rffi.LONG, diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -60,7 +60,8 @@ if sys.platform == 'win32': # Can't rename a DLL: it is always called 'libpypy-c.dll' for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr90.dll']: + 'libexpat.dll', 'sqlite3.dll', 'msvcr90.dll', + 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) @@ -125,7 +126,7 @@ zf.close() else: archive = str(builddir.join(name + '.tar.bz2')) - if sys.platform == 'darwin': + if sys.platform == 'darwin' or sys.platform.startswith('freebsd'): e = os.system('tar --numeric-owner -cvjf ' + archive + " " + name) else: e = os.system('tar --owner=root --group=root --numeric-owner -cvjf ' + archive + " " + name) diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -28,11 +28,13 @@ gctransformer = None def __init__(self, translator=None, standalone=False, + cpython_extension=False, gcpolicyclass=None, thread_enabled=False, sandbox=False): self.translator = translator self.standalone = standalone + self.cpython_extension = cpython_extension self.sandbox = sandbox if gcpolicyclass is None: gcpolicyclass = gc.RefcountingGcPolicy diff --git a/pypy/translator/c/dlltool.py b/pypy/translator/c/dlltool.py --- a/pypy/translator/c/dlltool.py +++ b/pypy/translator/c/dlltool.py @@ -14,11 +14,14 @@ CBuilder.__init__(self, *args, **kwds) def getentrypointptr(self): + entrypoints = [] bk = self.translator.annotator.bookkeeper - graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions] - return [getfunctionptr(graph) for graph in graphs] + for f, _ in self.functions: + graph = bk.getdesc(f).getuniquegraph() + entrypoints.append(getfunctionptr(graph)) + return entrypoints - def gen_makefile(self, targetdir): + def gen_makefile(self, targetdir, exe_name=None): pass # XXX finish def compile(self): diff --git a/pypy/translator/c/extfunc.py b/pypy/translator/c/extfunc.py --- a/pypy/translator/c/extfunc.py +++ b/pypy/translator/c/extfunc.py @@ -5,7 +5,6 @@ from pypy.rpython.lltypesystem.rstr import STR, mallocstr from pypy.rpython.lltypesystem import rstr from pypy.rpython.lltypesystem import rlist -from pypy.rpython.module import ll_time, ll_os # table of functions hand-written in src/ll_*.h # Note about *.im_func: The annotator and the rtyper expect direct @@ -106,7 +105,7 @@ yield ('RPYTHON_EXCEPTION_MATCH', exceptiondata.fn_exception_match) yield ('RPYTHON_TYPE_OF_EXC_INST', exceptiondata.fn_type_of_exc_inst) yield ('RPYTHON_RAISE_OSERROR', exceptiondata.fn_raise_OSError) - if not db.standalone: + if db.cpython_extension: yield ('RPYTHON_PYEXCCLASS2EXC', exceptiondata.fn_pyexcclass2exc) yield ('RPyExceptionOccurred1', exctransformer.rpyexc_occured_ptr.value) diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -471,19 +471,22 @@ return [] IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ - 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', + 'cmp', 'test', 'set', 'sahf', 'lahf', 'cld', 'std', + 'rep', 'movs', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', 'movap', 'movd', 'movlp', 'sqrtsd', 'movhpd', 'mins', 'minp', 'maxs', 'maxp', 'unpck', 'pxor', 'por', # sse2 + 'shufps', 'shufpd', # arithmetic operations should not produce GC pointers 'inc', 'dec', 'not', 'neg', 'or', 'and', 'sbb', 'adc', 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', 'paddq', 'pinsr', + # sign-extending moves should not produce GC pointers + 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers 'movz', # locked operations should not move GC pointers, at least so far @@ -1694,6 +1697,8 @@ } """ elif self.format in ('elf64', 'darwin64'): + if self.format == 'elf64': # gentoo patch: hardened systems + print >> output, "\t.section .note.GNU-stack,\"\",%progbits" print >> output, "\t.text" print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') _variant(elf64='.type pypy_asm_stackwalk, @function', diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -111,6 +111,7 @@ _compiled = False modulename = None split = False + cpython_extension = False def __init__(self, translator, entrypoint, config, gcpolicy=None, secondary_entrypoints=()): @@ -138,6 +139,7 @@ raise NotImplementedError("--gcrootfinder=asmgcc requires standalone") db = LowLevelDatabase(translator, standalone=self.standalone, + cpython_extension=self.cpython_extension, gcpolicyclass=gcpolicyclass, thread_enabled=self.config.translation.thread, sandbox=self.config.translation.sandbox) @@ -236,6 +238,8 @@ CBuilder.have___thread = self.translator.platform.check___thread() if not self.standalone: assert not self.config.translation.instrument + if self.cpython_extension: + defines['PYPY_CPYTHON_EXTENSION'] = 1 else: defines['PYPY_STANDALONE'] = db.get(pf) if self.config.translation.instrument: @@ -307,13 +311,18 @@ class CExtModuleBuilder(CBuilder): standalone = False + cpython_extension = True _module = None _wrapper = None def get_eci(self): from distutils import sysconfig python_inc = sysconfig.get_python_inc() - eci = ExternalCompilationInfo(include_dirs=[python_inc]) + eci = ExternalCompilationInfo( + include_dirs=[python_inc], + includes=["Python.h", + ], + ) return eci.merge(CBuilder.get_eci(self)) def getentrypointptr(self): # xxx diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -46,13 +46,13 @@ * of return type *Bigint all return NULL to indicate a malloc failure. * Similarly, rv_alloc and nrv_alloc (return type char *) return NULL on * failure. bigcomp now has return type int (it used to be void) and - * returns -1 on failure and 0 otherwise. _Py_dg_dtoa returns NULL - * on failure. _Py_dg_strtod indicates failure due to malloc failure + * returns -1 on failure and 0 otherwise. __Py_dg_dtoa returns NULL + * on failure. __Py_dg_strtod indicates failure due to malloc failure * by returning -1.0, setting errno=ENOMEM and *se to s00. * * 4. The static variable dtoa_result has been removed. Callers of - * _Py_dg_dtoa are expected to call _Py_dg_freedtoa to free - * the memory allocated by _Py_dg_dtoa. + * __Py_dg_dtoa are expected to call __Py_dg_freedtoa to free + * the memory allocated by __Py_dg_dtoa. * * 5. The code has been reformatted to better fit with Python's * C style guide (PEP 7). @@ -61,7 +61,7 @@ * that hasn't been MALLOC'ed, private_mem should only be used when k <= * Kmax. * - * 7. _Py_dg_strtod has been modified so that it doesn't accept strings with + * 7. __Py_dg_strtod has been modified so that it doesn't accept strings with * leading whitespace. * ***************************************************************/ @@ -283,7 +283,7 @@ #define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1)) #define Big1 0xffffffff -/* struct BCinfo is used to pass information from _Py_dg_strtod to bigcomp */ +/* struct BCinfo is used to pass information from __Py_dg_strtod to bigcomp */ typedef struct BCinfo BCinfo; struct @@ -494,7 +494,7 @@ /* convert a string s containing nd decimal digits (possibly containing a decimal separator at position nd0, which is ignored) to a Bigint. This - function carries on where the parsing code in _Py_dg_strtod leaves off: on + function carries on where the parsing code in __Py_dg_strtod leaves off: on entry, y9 contains the result of converting the first 9 digits. Returns NULL on failure. */ @@ -1050,7 +1050,7 @@ } /* Convert a scaled double to a Bigint plus an exponent. Similar to d2b, - except that it accepts the scale parameter used in _Py_dg_strtod (which + except that it accepts the scale parameter used in __Py_dg_strtod (which should be either 0 or 2*P), and the normalization for the return value is different (see below). On input, d should be finite and nonnegative, and d / 2**scale should be exactly representable as an IEEE 754 double. @@ -1351,9 +1351,9 @@ /* The bigcomp function handles some hard cases for strtod, for inputs with more than STRTOD_DIGLIM digits. It's called once an initial estimate for the double corresponding to the input string has - already been obtained by the code in _Py_dg_strtod. + already been obtained by the code in __Py_dg_strtod. - The bigcomp function is only called after _Py_dg_strtod has found a + The bigcomp function is only called after __Py_dg_strtod has found a double value rv such that either rv or rv + 1ulp represents the correctly rounded value corresponding to the original string. It determines which of these two values is the correct one by @@ -1368,12 +1368,12 @@ s0 points to the first significant digit of the input string. rv is a (possibly scaled) estimate for the closest double value to the - value represented by the original input to _Py_dg_strtod. If + value represented by the original input to __Py_dg_strtod. If bc->scale is nonzero, then rv/2^(bc->scale) is the approximation to the input value. bc is a struct containing information gathered during the parsing and - estimation steps of _Py_dg_strtod. Description of fields follows: + estimation steps of __Py_dg_strtod. Description of fields follows: bc->e0 gives the exponent of the input value, such that dv = (integer given by the bd->nd digits of s0) * 10**e0 @@ -1505,7 +1505,7 @@ } static double -_Py_dg_strtod(const char *s00, char **se) +__Py_dg_strtod(const char *s00, char **se) { int bb2, bb5, bbe, bd2, bd5, bs2, c, dsign, e, e1, error; int esign, i, j, k, lz, nd, nd0, odd, sign; @@ -1849,7 +1849,7 @@ for(;;) { - /* This is the main correction loop for _Py_dg_strtod. + /* This is the main correction loop for __Py_dg_strtod. We've got a decimal value tdv, and a floating-point approximation srv=rv/2^bc.scale to tdv. The aim is to determine whether srv is @@ -2283,7 +2283,7 @@ */ static void -_Py_dg_freedtoa(char *s) +__Py_dg_freedtoa(char *s) { Bigint *b = (Bigint *)((int *)s - 1); b->maxwds = 1 << (b->k = *(int*)b); @@ -2325,11 +2325,11 @@ */ /* Additional notes (METD): (1) returns NULL on failure. (2) to avoid memory - leakage, a successful call to _Py_dg_dtoa should always be matched by a - call to _Py_dg_freedtoa. */ + leakage, a successful call to __Py_dg_dtoa should always be matched by a + call to __Py_dg_freedtoa. */ static char * -_Py_dg_dtoa(double dd, int mode, int ndigits, +__Py_dg_dtoa(double dd, int mode, int ndigits, int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those @@ -2926,7 +2926,7 @@ if (b) Bfree(b); if (s0) - _Py_dg_freedtoa(s0); + __Py_dg_freedtoa(s0); return NULL; } @@ -2947,7 +2947,7 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_strtod(s00, se); + result = __Py_dg_strtod(s00, se); _PyPy_SET_53BIT_PRECISION_END; return result; } @@ -2959,14 +2959,14 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); + result = __Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); _PyPy_SET_53BIT_PRECISION_END; return result; } void _PyPy_dg_freedtoa(char *s) { - _Py_dg_freedtoa(s); + __Py_dg_freedtoa(s); } /* End PYPY hacks */ diff --git a/pypy/translator/c/src/exception.h b/pypy/translator/c/src/exception.h --- a/pypy/translator/c/src/exception.h +++ b/pypy/translator/c/src/exception.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header subsection: exceptions ***/ -#if !defined(PYPY_STANDALONE) && !defined(PYPY_NOT_MAIN_FILE) +#if defined(PYPY_CPYTHON_EXTENSION) && !defined(PYPY_NOT_MAIN_FILE) PyObject *RPythonError; #endif @@ -74,7 +74,7 @@ RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(rexc), rexc); } -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION void RPyConvertExceptionFromCPython(void) { /* convert the CPython exception to an RPython one */ diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header file for code produced by genc.py ***/ -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION # include "Python.h" # include "compile.h" # include "frameobject.h" diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h --- a/pypy/translator/c/src/g_prerequisite.h +++ b/pypy/translator/c/src/g_prerequisite.h @@ -5,8 +5,6 @@ #ifdef PYPY_STANDALONE # include "src/commondefs.h" -#else -# include "Python.h" #endif #ifdef _WIN32 diff --git a/pypy/translator/c/src/pyobj.h b/pypy/translator/c/src/pyobj.h --- a/pypy/translator/c/src/pyobj.h +++ b/pypy/translator/c/src/pyobj.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header subsection: untyped operations ***/ /*** as OP_XXX() macros calling the CPython API ***/ - +#ifdef PYPY_CPYTHON_EXTENSION #define op_bool(r,what) { \ int _retval = what; \ @@ -261,3 +261,5 @@ } #endif + +#endif /* PYPY_CPYTHON_EXTENSION */ diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -104,7 +104,7 @@ # define RPyBareItem(array, index) ((array)[index]) #endif -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION /* prototypes */ diff --git a/pypy/translator/c/test/test_dlltool.py b/pypy/translator/c/test/test_dlltool.py --- a/pypy/translator/c/test/test_dlltool.py +++ b/pypy/translator/c/test/test_dlltool.py @@ -2,7 +2,6 @@ from pypy.translator.c.dlltool import DLLDef from ctypes import CDLL import py -py.test.skip("fix this if needed") class TestDLLTool(object): def test_basic(self): @@ -16,8 +15,8 @@ d = DLLDef('lib', [(f, [int]), (b, [int])]) so = d.compile() dll = CDLL(str(so)) - assert dll.f(3) == 3 - assert dll.b(10) == 12 + assert dll.pypy_g_f(3) == 3 + assert dll.pypy_g_b(10) == 12 def test_split_criteria(self): def f(x): @@ -28,4 +27,5 @@ d = DLLDef('lib', [(f, [int]), (b, [int])]) so = d.compile() - assert py.path.local(so).dirpath().join('implement.c').check() + dirpath = py.path.local(so).dirpath() + assert dirpath.join('translator_c_test_test_dlltool.c').check() diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -331,6 +331,7 @@ raise Exception("stand-alone program entry point must return an " "int (and not, e.g., None or always raise an " "exception).") + annotator.complete() annotator.simplify() return s diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -139,8 +139,14 @@ items = pypyjit.defaults.items() items.sort() for key, value in items: - print ' --jit %s=N %s%s (default %s)' % ( - key, ' '*(18-len(key)), pypyjit.PARAMETER_DOCS[key], value) + prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) + while len(doc) > 51: + i = doc[:51].rfind(' ') + print prefix + doc[:i] + doc = doc[i+1:] + prefix = ' '*len(prefix) + print prefix + doc print ' --jit off turn off the JIT' def print_version(*args): From noreply at buildbot.pypy.org Thu Mar 1 13:56:30 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 13:56:30 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): merge runner_test.py with ARM backend Message-ID: <20120301125630.E874D8204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53051:84f475f814f2 Date: 2012-03-01 04:55 -0800 http://bitbucket.org/pypy/pypy/changeset/84f475f814f2/ Log: (bivab, hager): merge runner_test.py with ARM backend diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -630,27 +630,48 @@ 'float', descr=calldescr) assert abs(res.getfloat() - 4.6) < 0.0001 - def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than # 6, which will force passing some arguments on the stack on 64-bit) - num_args = 16 + def func(*args): - assert len(args) == num_args + assert len(args) == 16 # Try to sum up args in a way that would probably detect a # transposed argument return sum(arg * (2**i) for i, arg in enumerate(args)) - FUNC = self.FuncType([lltype.Signed]*num_args, lltype.Signed) + FUNC = self.FuncType([lltype.Signed]*16, lltype.Signed) FPTR = self.Ptr(FUNC) calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) func_ptr = llhelper(FPTR, func) - args = range(num_args) + args = range(16) funcbox = self.get_funcbox(self.cpu, func_ptr) res = self.execute_operation(rop.CALL, [funcbox] + map(BoxInt, args), 'int', descr=calldescr) assert res.value == func(*args) + def test_call_box_func(self): + def a(a1, a2): + return a1 + a2 + def b(b1, b2): + return b1 * b2 + + arg1 = 40 + arg2 = 2 + for f in [a, b]: + TP = lltype.Signed + FPTR = self.Ptr(self.FuncType([TP, TP], TP)) + func_ptr = llhelper(FPTR, f) + FUNC = deref(FPTR) + funcconst = self.get_funcbox(self.cpu, func_ptr) + funcbox = funcconst.clonebox() + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + res = self.execute_operation(rop.CALL, + [funcbox, BoxInt(arg1), BoxInt(arg2)], + 'int', descr=calldescr) + assert res.getint() == f(arg1, arg2) + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -776,6 +797,7 @@ assert self.execute_operation(opname, args, 'void') == None assert not self.guard_failed + def test_passing_guard_class(self): t_box, T_box = self.alloc_instance(self.T) #null_box = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(T))) @@ -1255,7 +1277,7 @@ else: assert 0 assert type(got) == type(val) - #assert got == val + assert got == val def test_compile_bridge_float(self): if not self.cpu.supports_floats: @@ -2120,6 +2142,7 @@ values.append(descr) values.append(self.cpu.get_latest_value_int(0)) values.append(self.cpu.get_latest_value_int(1)) + values.append(token) FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force) @@ -2150,7 +2173,8 @@ assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 - assert values == [faildescr, 1, 10] + token = self.cpu.get_latest_force_token() + assert values == [faildescr, 1, 10, token] def test_force_operations_returning_int(self): values = [] @@ -2159,6 +2183,7 @@ self.cpu.force(token) values.append(self.cpu.get_latest_value_int(0)) values.append(self.cpu.get_latest_value_int(2)) + values.append(token) return 42 FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed) @@ -2192,7 +2217,8 @@ assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 assert self.cpu.get_latest_value_int(2) == 10 - assert values == [1, 10] + token = self.cpu.get_latest_force_token() + assert values == [1, 10, token] def test_force_operations_returning_float(self): if not self.cpu.supports_floats: @@ -2203,6 +2229,7 @@ self.cpu.force(token) values.append(self.cpu.get_latest_value_int(0)) values.append(self.cpu.get_latest_value_int(2)) + values.append(token) return 42.5 FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Float) @@ -2238,7 +2265,8 @@ x = self.cpu.get_latest_value_float(1) assert longlong.getrealfloat(x) == 42.5 assert self.cpu.get_latest_value_int(2) == 10 - assert values == [1, 10] + token = self.cpu.get_latest_force_token() + assert values == [1, 10, token] def test_call_to_c_function(self): from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL @@ -2472,6 +2500,35 @@ assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 333 + def test_guard_not_invalidated_and_label(self): + # test that the guard_not_invalidated reserves enough room before + # the label. If it doesn't, then in this example after we invalidate + # the guard, jumping to the label will hit the invalidation code too + cpu = self.cpu + i0 = BoxInt() + faildescr = BasicFailDescr(1) + labeldescr = TargetToken() + ops = [ + ResOperation(rop.GUARD_NOT_INVALIDATED, [], None, descr=faildescr), + ResOperation(rop.LABEL, [i0], None, descr=labeldescr), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(3)), + ] + ops[0].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop([i0], ops, looptoken) + # mark as failing + self.cpu.invalidate_loop(looptoken) + # attach a bridge + i2 = BoxInt() + ops = [ + ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), + ] + self.cpu.compile_bridge(faildescr, [], ops, looptoken) + # run: must not be caught in an infinite loop + fail = self.cpu.execute_token(looptoken, 16) + assert fail.identifier == 3 + assert self.cpu.get_latest_value_int(0) == 333 + # pure do_ / descr features def test_do_operations(self): @@ -3323,6 +3380,55 @@ res = self.cpu.get_latest_value_int(0) assert res == -10 + def test_compile_asmlen(self): + from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("pointless test on non-asm") + from pypy.jit.backend.tool.viewcode import machine_code_dump + import ctypes + ops = """ + [i3, i2] + i0 = same_as(i2) # but forced to be in a register + label(i0, descr=1) + i1 = int_add(i0, i0) + guard_true(i1, descr=faildesr) [i1] + jump(i1, descr=1) + """ + faildescr = BasicFailDescr(2) + loop = parse(ops, self.cpu, namespace=locals()) + faildescr = loop.operations[-2].getdescr() + jumpdescr = loop.operations[-1].getdescr() + bridge_ops = """ + [i0] + jump(i0, descr=jumpdescr) + """ + bridge = parse(bridge_ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.assembler.set_debug(False) + info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + bridge_info = self.cpu.compile_bridge(faildescr, bridge.inputargs, + bridge.operations, + looptoken) + self.cpu.assembler.set_debug(True) # always on untranslated + assert info.asmlen != 0 + cpuname = autodetect_main_model_and_size() + # XXX we have to check the precise assembler, otherwise + # we don't quite know if borders are correct + + def checkops(mc, ops): + assert len(mc) == len(ops) + for i in range(len(mc)): + assert mc[i].split("\t")[2].startswith(ops[i]) + + data = ctypes.string_at(info.asmaddr, info.asmlen) + mc = list(machine_code_dump(data, info.asmaddr, cpuname)) + lines = [line for line in mc if line.count('\t') >= 2] + checkops(lines, self.add_loop_instructions) + data = ctypes.string_at(bridge_info.asmaddr, bridge_info.asmlen) + mc = list(machine_code_dump(data, bridge_info.asmaddr, cpuname)) + lines = [line for line in mc if line.count('\t') >= 2] + checkops(lines, self.bridge_loop_instructions) + def test_compile_bridge_with_target(self): # This test creates a loopy piece of code in a bridge, and builds another # unrelated loop that ends in a jump directly to this loopy bit of code. @@ -3407,6 +3513,43 @@ fail = self.cpu.execute_token(looptoken2, -9) assert fail.identifier == 42 + def test_forcing_op_with_fail_arg_in_reg(self): + values = [] + def maybe_force(token, flag): + self.cpu.force(token) + values.append(self.cpu.get_latest_value_int(0)) + values.append(token) + return 42 + + FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed) + func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force) + funcbox = self.get_funcbox(self.cpu, func_ptr).constbox() + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + tok = BoxInt() + faildescr = BasicFailDescr(23) + ops = [ + ResOperation(rop.FORCE_TOKEN, [], tok), + ResOperation(rop.CALL_MAY_FORCE, [funcbox, tok, i1], i2, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) + ] + ops[2].setfailargs([i2]) + looptoken = JitCellToken() + self.cpu.compile_loop([i0, i1], ops, looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) + assert fail.identifier == 23 + assert self.cpu.get_latest_value_int(0) == 42 + # make sure that force reads the registers from a zeroed piece of + # memory + assert values[0] == 0 + token = self.cpu.get_latest_force_token() + assert values[1] == token + def test_finish_with_long_arglist(self): boxes = [BoxInt(i) for i in range(30)] ops = [ResOperation(rop.FINISH, boxes, None, descr=BasicFailDescr(1))] @@ -3421,7 +3564,7 @@ boxes = [BoxInt(i) for i in range(30)] ops = [ResOperation(rop.GUARD_FALSE, [boxes[1]], None, descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2))] - ops[0].setfailargs(boxes) + ops[0].setfailargs(boxes) looptoken = JitCellToken() self.cpu.compile_loop(boxes, ops, looptoken) fail = self.cpu.execute_token(looptoken, *range(30)) From noreply at buildbot.pypy.org Thu Mar 1 14:30:41 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 14:30:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: bah, I broke == for != for all objects which are not identical :-/. Fix it Message-ID: <20120301133041.E837A8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53052:5522d023cedb Date: 2012-03-01 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5522d023cedb/ Log: bah, I broke == for != for all objects which are not identical :-/. Fix it diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -558,6 +558,15 @@ if w_res is not None: return w_res # + # we did not find any special method, let's do the default logic for + # == and != + if left == '__eq__' or left == '__ne__': + # they are not identical, else it would have been caught by the if + # at the top of the function + assert not space.is_w(w_obj1, w_obj2) + return space.wrap(left != '__eq__') + # + # if we arrived here, they are unorderable typename1 = space.type(w_obj1).getname(space) typename2 = space.type(w_obj2).getname(space) raise operationerrfmt(space.w_TypeError, diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -321,6 +321,30 @@ raises(TypeError, "0.0 < zz()") raises(TypeError, "0j < zz()") + def test_equality_among_different_types(self): + class A(object): pass + class zz(object): pass + a = A() + assert a == a + for x, y in [(A(), A()), + (A(), zz()), + (A(), A()), + (A(), None), + (None, A()), + (0, ()), + (0.0, ()), + (0j, ()), + (0, []), + (0.0, []), + (0j, []), + (0, A()), + (0.0, A()), + (0j, A()), + ]: + assert not x == y + assert x != y + + def test_setattrweakref(self): skip("fails, works in cpython") # The issue is that in CPython, none of the built-in types have From noreply at buildbot.pypy.org Thu Mar 1 16:41:40 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 16:41:40 +0100 (CET) Subject: [pypy-commit] pypy default: add a way to automatically define __gt__, __ge__, __le__ and __ne__ on top of the given __lt__ and __eq__ Message-ID: <20120301154140.BDB0D8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53053:bdff1d20bb98 Date: 2012-03-01 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/bdff1d20bb98/ Log: add a way to automatically define __gt__, __ge__, __le__ and __ne__ on top of the given __lt__ and __eq__ diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,27 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.or_(space.lt(w_self, w_other), + space.eq(w_self, w_other)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.not_(space.le(w_self, w_other)) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support From noreply at buildbot.pypy.org Thu Mar 1 17:11:45 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:45 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Starting to hack at the x86 backend. Right now I'm just hacking and Message-ID: <20120301161145.326E28204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53054:b2c0c89fe245 Date: 2012-03-01 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/b2c0c89fe245/ Log: Starting to hack at the x86 backend. Right now I'm just hacking and will rely on "hg diff" to merge it more cleanly keeping both versions. diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -30,3 +30,16 @@ # # Note that with asmgcc, the locations corresponding to callee-save registers # are never used. + +# In the offstack version (i.e. when using stacklets): the off-stack allocated +# area starts with the FRAME_FIXED_SIZE words in the same order as they would +# be on the real stack (which is top-to-bottom, so it's actually the opposite +# order as the one in the comments above); but whereas the real stack would +# have the spilled values stored in (ebp-20), (ebp-24), etc., the off-stack +# has them stored in (ebp+8), (ebp+12), etc. +# +# In stacklet mode, the real frame contains always just OFFSTACK_REAL_FRAME +# words reserved for temporary usage like call arguments. To maintain +# alignment on 32-bit, OFFSTACK_REAL_FRAME % 4 == 3, and it is at least 17 +# to handle all other cases. +OFFSTACK_REAL_FRAME = 19 diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -13,7 +13,8 @@ gpr_reg_mgr_cls, _valid_addressing_size) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, - IS_X86_32, IS_X86_64) + IS_X86_32, IS_X86_64, + OFFSTACK_REAL_FRAME) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, @@ -84,6 +85,9 @@ self.malloc_slowpath1 = 0 self.malloc_slowpath2 = 0 self.memcpy_addr = 0 + self.offstack_malloc = 0 + self.offstack_realloc = 0 + self.offstack_free = 0 self.setup_failure_recovery() self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') @@ -107,7 +111,11 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) + cpi = self.cpu.cast_ptr_to_int + self.memcpy_addr = cpi(support.memcpy_fn) + self.offstack_malloc_addr = cpi(support.offstack_malloc_fn) + self.offstack_realloc_addr = cpi(support.offstack_realloc_fn) + self.offstack_free_addr = cpi(support.offstack_free_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) if self.cpu.supports_floats: @@ -435,17 +443,17 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # self._call_header_with_stack_check() - stackadjustpos = self._patchable_stackadjust() clt._debug_nbargs = len(inputargs) operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - clt.param_depth = -1 # temporarily - frame_depth, param_depth = self._assemble(regalloc, operations) + #clt.param_depth = -1 # temporarily + (frame_depth#, param_depth + ) = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - clt.param_depth = param_depth + #clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -459,8 +467,8 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + #self._patch_stackadjust(rawstart + stackadjustpos, + # frame_depth )#+ param_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -529,7 +537,7 @@ ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) + #self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -701,14 +709,14 @@ if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging frame_depth = regalloc.fm.get_frame_depth() - param_depth = regalloc.param_depth + #param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - target_param_depth = jump_target_descr._x86_clt.param_depth + #target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - param_depth = max(param_depth, target_param_depth) - return frame_depth, param_depth + #param_depth = max(param_depth, target_param_depth) + return frame_depth#, param_depth def _patchable_stackadjust(self): # stack adjustment LEA @@ -733,10 +741,28 @@ def _call_header(self): # NB. the shape of the frame is hard-coded in get_basic_shape() too. # Also, make sure this is consistent with FRAME_FIXED_SIZE. - self.mc.PUSH_r(ebp.value) - self.mc.MOV_rr(ebp.value, esp.value) - for loc in self.cpu.CALLEE_SAVE_REGISTERS: - self.mc.PUSH_r(loc.value) + if IS_X86_32: + self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME-1)) + self.mc.PUSH_i32(4096) # XXX XXX! + elif IS_X86_64: + save_regs = [r9, r8, ecx, edx, esi, edi] + assert OFFSTACK_REAL_FRAME >= len(save_regs) + self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME + - len(save_regs))) + for reg in save_regs: + self.mc.PUSH_r(reg.value) + self.mc.MOV_ri(edi.value, 4096) # XXX XXX! + self.mc.CALL(imm(self.offstack_malloc_addr)) + if IS_X86_64: + for i in range(len(save_regs)): # XXX looks heavy + reg = save_regs[len(save_regs) - 1 - i] + self.mc.MOV_rs(reg.value, WORD * i) + self.mc.MOV_mr((eax.value, WORD * (FRAME_FIXED_SIZE-1)), + ebp.value) # (new ebp) <- ebp + self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)): + loc = self.cpu.CALLEE_SAVE_REGISTERS[i] + self.mc.MOV_br(WORD*(-1-i), loc.value) # (ebp-4-4*i) <- reg gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -761,16 +787,17 @@ self._call_header() def _call_footer(self): - self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) + self.mc.ADD_ri(esp.value, WORD * OFFSTACK_REAL_FRAME) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): - self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) + loc = self.cpu.CALLEE_SAVE_REGISTERS[i] + self.mc.MOV_rb(loc.value, WORD*(-1-i)) # (ebp-4-4*i) -> reg + self.mc.MOV_rb(ebp.value, 0) # (ebp) -> ebp + # XXX free! - self.mc.POP_r(ebp.value) self.mc.RET() def _call_header_shadowstack(self, gcrootmap): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -23,6 +23,7 @@ TempBox from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS +from pypy.jit.backend.x86.arch import OFFSTACK_REAL_FRAME from pypy.rlib.rarithmetic import r_longlong class X86RegisterManager(RegisterManager): @@ -129,9 +130,9 @@ class X86FrameManager(FrameManager): @staticmethod def frame_pos(i, box_type): - if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), box_type) - else: + #if IS_X86_32 and box_type == FLOAT: + # return StackLoc(i, get_ebp_ofs(i+1), box_type) + #else: return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): @@ -168,7 +169,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - self.param_depth = 0 + #self.param_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -197,7 +198,7 @@ allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.param_depth = prev_depths[1] + #self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -206,11 +207,24 @@ at_least_position) def reserve_param(self, n): + xxx self.param_depth = max(self.param_depth, n) def _set_initial_bindings(self, inputargs): if IS_X86_64: inputargs = self._set_initial_bindings_regs_64(inputargs) + + cur_frame_ofs = WORD * (OFFSTACK_REAL_FRAME + 1) + mc = self.assembler.mc + for box in inputargs: + assert isinstance(box, Box) + if IS_X86_32 and box.type == FLOAT: + xxx + loc = self.fm.loc(box) + mc.MOV_rs(eax.value, cur_frame_ofs) + mc.MOV_br(loc.value, eax.value) + return + # ... # stack layout: arg2 # arg1 @@ -1518,11 +1532,15 @@ else: oplist[num] = value +##def get_ebp_ofs(position): +## # Argument is a frame position (0, 1, 2...). +## # Returns (ebp-20), (ebp-24), (ebp-28)... +## # i.e. the n'th word beyond the fixed frame size. +## return -WORD * (FRAME_FIXED_SIZE + position) def get_ebp_ofs(position): # Argument is a frame position (0, 1, 2...). - # Returns (ebp-20), (ebp-24), (ebp-28)... - # i.e. the n'th word beyond the fixed frame size. - return -WORD * (FRAME_FIXED_SIZE + position) + # Returns (ebp+8), (ebp+12), (ebp+16)... + return WORD * (2 + position) def _valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -33,6 +33,14 @@ memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, rffi.SIZE_T], lltype.Void, sandboxsafe=True, _nowrapper=True) +offstack_malloc_fn = rffi.llexternal('malloc', [rffi.SIZE_T], + llmemory.Address, + sandboxsafe=True, _nowrapper=True) +offstack_realloc_fn = rffi.llexternal('realloc', [llmemory.Address, + rffi.SIZE_T], llmemory.Address, + sandboxsafe=True, _nowrapper=True) +offstack_free_fn = rffi.llexternal('free', [llmemory.Address], lltype.Void, + sandboxsafe=True, _nowrapper=True) # ____________________________________________________________ From noreply at buildbot.pypy.org Thu Mar 1 17:11:46 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:46 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fixes. Message-ID: <20120301161146.6BC4C8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53055:caa28d6b97f5 Date: 2012-03-01 16:16 +0100 http://bitbucket.org/pypy/pypy/changeset/caa28d6b97f5/ Log: Fixes. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -786,12 +786,12 @@ # self._call_header() - def _call_footer(self): + def _call_footer(self, extra_esp=0): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) - self.mc.ADD_ri(esp.value, WORD * OFFSTACK_REAL_FRAME) + self.mc.ADD_ri(esp.value, WORD * OFFSTACK_REAL_FRAME + extra_esp) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): loc = self.cpu.CALLEE_SAVE_REGISTERS[i] self.mc.MOV_rb(loc.value, WORD*(-1-i)) # (ebp-4-4*i) -> reg @@ -1958,9 +1958,12 @@ mc = codebuf.MachineCodeBlockWrapper() self.mc = mc + extra_esp = WORD # we reach this code with an extra CALL + # Push all general purpose registers for gpr in range(self.cpu.NUM_REGS-1, -1, -1): mc.PUSH_r(gpr) + extra_esp += self.cpu.NUM_REGS * WORD # ebx/rbx is callee-save in both i386 and x86-64 mc.MOV_rr(ebx.value, esp.value) @@ -1970,6 +1973,7 @@ mc.SUB_ri(esp.value, self.cpu.NUM_REGS*8) for i in range(self.cpu.NUM_REGS): mc.MOVSD_sx(8*i, i) + extra_esp += self.cpu.NUM_REGS*8 # we call a provided function that will # - call our on_leave_jitted_hook which will mark @@ -1991,10 +1995,9 @@ # XXX if IS_X86_32: mc.PUSH_r(ebx.value) + extra_esp += 1 elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2002,11 +2005,12 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _call_header_with_stack_check(). The LEA in _call_footer below + # _call_header_with_stack_check(). We have to compute how many + # extra PUSHes we just did. # throws away most of the frame, including all the PUSHes that we # did just above. - self._call_footer() + self._call_footer(extra_esp) rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.failure_recovery_code[exc + 2 * withfloats] = rawstart self.mc = None diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -394,6 +394,8 @@ # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls arg0 = self.fm.get_frame_depth() + return arg0 + # arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: From noreply at buildbot.pypy.org Thu Mar 1 17:11:47 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:47 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Next test passes. Message-ID: <20120301161147.9C2488204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53056:388d3780ddea Date: 2012-03-01 16:17 +0100 http://bitbucket.org/pypy/pypy/changeset/388d3780ddea/ Log: Next test passes. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -514,8 +514,9 @@ operations, self.current_clt.allgcrefs) - stackadjustpos = self._patchable_stackadjust() - frame_depth, param_depth = self._assemble(regalloc, operations) + #stackadjustpos = self._patchable_stackadjust() + (frame_depth #, param_depth + ) = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -525,13 +526,13 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + #self._patch_stackadjust(rawstart + stackadjustpos, + # frame_depth + param_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - faildescr._x86_bridge_param_depth = param_depth + #faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset From noreply at buildbot.pypy.org Thu Mar 1 17:11:48 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:48 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fixes Message-ID: <20120301161148.CF3A08204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53057:ff37a8ff08c2 Date: 2012-03-01 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ff37a8ff08c2/ Log: Fixes diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -85,9 +85,9 @@ self.malloc_slowpath1 = 0 self.malloc_slowpath2 = 0 self.memcpy_addr = 0 - self.offstack_malloc = 0 - self.offstack_realloc = 0 - self.offstack_free = 0 + self.offstack_malloc_addr = 0 + self.offstack_realloc_addr = 0 + self.offstack_free_addr = 0 self.setup_failure_recovery() self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') @@ -746,18 +746,28 @@ self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME-1)) self.mc.PUSH_i32(4096) # XXX XXX! elif IS_X86_64: + # XXX very heavily save and restore all possible argument registers save_regs = [r9, r8, ecx, edx, esi, edi] - assert OFFSTACK_REAL_FRAME >= len(save_regs) - self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME - - len(save_regs))) - for reg in save_regs: - self.mc.PUSH_r(reg.value) + save_xmm_regs = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + assert OFFSTACK_REAL_FRAME >= len(save_regs) + len(save_xmm_regs) + self.mc.SUB_ri(esp.value, WORD * OFFSTACK_REAL_FRAME) + for i in range(len(save_regs)): + self.mc.MOV_sr(WORD * i, save_regs[i].value) + base = len(save_regs) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_sx(WORD * (base + i), save_xmm_regs[i].value) + # self.mc.MOV_ri(edi.value, 4096) # XXX XXX! + # self.mc.CALL(imm(self.offstack_malloc_addr)) + # if IS_X86_64: - for i in range(len(save_regs)): # XXX looks heavy - reg = save_regs[len(save_regs) - 1 - i] - self.mc.MOV_rs(reg.value, WORD * i) + for i in range(len(save_regs)): + self.mc.MOV_rs(save_regs[i].value, WORD * i) + base = len(save_regs) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_xs(save_xmm_regs[i].value, WORD * (base + i)) + # self.mc.MOV_mr((eax.value, WORD * (FRAME_FIXED_SIZE-1)), ebp.value) # (new ebp) <- ebp self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -223,6 +223,7 @@ loc = self.fm.loc(box) mc.MOV_rs(eax.value, cur_frame_ofs) mc.MOV_br(loc.value, eax.value) + cur_frame_ofs += 1 return # ... From noreply at buildbot.pypy.org Thu Mar 1 17:11:50 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:50 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fixes Message-ID: <20120301161150.0CD698204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53058:cbaa8453d70b Date: 2012-03-01 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/cbaa8453d70b/ Log: Fixes diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -207,8 +207,8 @@ at_least_position) def reserve_param(self, n): - xxx - self.param_depth = max(self.param_depth, n) + assert n <= OFFSTACK_REAL_FRAME + #self.param_depth = max(self.param_depth, n) def _set_initial_bindings(self, inputargs): if IS_X86_64: @@ -216,14 +216,21 @@ cur_frame_ofs = WORD * (OFFSTACK_REAL_FRAME + 1) mc = self.assembler.mc + if IS_X86_32: + xmmtmp = xmm0 + elif IS_X86_64: + xmmtmp = X86_64_XMM_SCRATCH_REG for box in inputargs: assert isinstance(box, Box) - if IS_X86_32 and box.type == FLOAT: - xxx loc = self.fm.loc(box) - mc.MOV_rs(eax.value, cur_frame_ofs) - mc.MOV_br(loc.value, eax.value) - cur_frame_ofs += 1 + if box.type == FLOAT: + mc.MOVSD_xs(xmmtmp.value, cur_frame_ofs) + mc.MOVSD_bx(loc.value, xmmtmp.value) + cur_frame_ofs += 8 + else: + mc.MOV_rs(eax.value, cur_frame_ofs) + mc.MOV_br(loc.value, eax.value) + cur_frame_ofs += WORD return # ... From noreply at buildbot.pypy.org Thu Mar 1 17:11:51 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:51 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: CALLs with a large number of arguments. Message-ID: <20120301161151.3D5EF8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53059:17f7f5edd8a7 Date: 2012-03-01 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/17f7f5edd8a7/ Log: CALLs with a large number of arguments. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1129,6 +1129,12 @@ else: pass_on_stack.append(loc) + extra_esp = 0 + if len(pass_on_stack) > OFFSTACK_REAL_FRAME: + extra_esp = WORD * align_stack_words(len(pass_on_stack) - + OFFSTACK_REAL_FRAME) + self.mc.SUB_ri(esp.value, extra_esp) + # Emit instructions to pass the stack arguments # XXX: Would be nice to let remap_frame_layout take care of this, but # we'd need to create something like StackLoc, but relative to esp, @@ -1165,10 +1171,13 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - self._regalloc.reserve_param(len(pass_on_stack)) + #self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index) + if extra_esp > 0: + self.mc.ADD_ri(esp.value, extra_esp) + def call(self, addr, args, res): force_index = self.write_new_force_index() self._emit_call(force_index, imm(addr), args) From noreply at buildbot.pypy.org Thu Mar 1 17:11:52 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:52 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fix on 32-bit. Message-ID: <20120301161152.6D4AA8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53060:75ac3cc2736a Date: 2012-03-01 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/75ac3cc2736a/ Log: Fix on 32-bit. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2015,7 +2015,7 @@ # XXX if IS_X86_32: mc.PUSH_r(ebx.value) - extra_esp += 1 + extra_esp += WORD elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) else: From noreply at buildbot.pypy.org Thu Mar 1 17:11:53 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:53 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Large calls on 32-bit. Message-ID: <20120301161153.9F05E8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53061:d6e3bdcf3b85 Date: 2012-03-01 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/d6e3bdcf3b85/ Log: Large calls on 32-bit. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1059,6 +1059,16 @@ n = len(arglocs) for i in range(start, n): loc = arglocs[i] + p += loc.get_width() + extra_esp = p//WORD - OFFSTACK_REAL_FRAME + if extra_esp > 0: + extra_esp = align_stack_words(extra_esp) * WORD + self.mc.SUB_ri(esp.value, extra_esp) + + p = 0 + n = len(arglocs) + for i in range(start, n): + loc = arglocs[i] if isinstance(loc, RegLoc): if loc.is_xmm: self.mc.MOVSD_sx(p, loc.value) @@ -1076,13 +1086,16 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - self._regalloc.reserve_param(p//WORD) + #self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) + # + if extra_esp > 0: + self.mc.ADD_ri(esp.value, extra_esp) def _fix_stdcall(self, callconv, p): from pypy.rlib.clibffi import FFI_STDCALL From noreply at buildbot.pypy.org Thu Mar 1 17:11:54 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:11:54 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fixes. Message-ID: <20120301161154.CFC5E8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53062:61962326e5b4 Date: 2012-03-01 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/61962326e5b4/ Log: Fixes. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -885,8 +885,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -896,8 +896,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -1919,8 +1919,9 @@ stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: - value_hi = value - value = rffi.cast(rffi.LONGP, stackloc - 4)[0] + #value_hi = value + #value = rffi.cast(rffi.LONGP, stackloc - 4)[0] + value_hi = rffi.cast(rffi.LONGP, stackloc + 4)[0] else: # 'code' identifies a register: load its value kind = code & 3 @@ -2218,6 +2219,7 @@ asmgcroot.FRAME_PTR) + 1) pos = self._regalloc.fm.reserve_location_in_frame(use_words) css = get_ebp_ofs(pos + use_words - 1) + xxxxxxxx # ^^^^ self._regalloc.close_stack_struct = css # The location where the future CALL will put its return address # will be [ESP-WORD]. But we can't use that as the next frame's From noreply at buildbot.pypy.org Thu Mar 1 17:21:06 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 17:21:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120301162106.3A26A8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53063:f9a6a7baf321 Date: 2012-03-01 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/f9a6a7baf321/ Log: hg merge default diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,27 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.or_(space.lt(w_self, w_other), + space.eq(w_self, w_other)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.not_(space.le(w_self, w_other)) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,48 +398,38 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) - def test_maybe_issue1045_related(self): + def test_issue1045(self): ops = """ - [p8] - p54 = getfield_gc(p8, descr=valuedescr) - mark_opaque_ptr(p54) - i55 = getfield_gc(p54, descr=nextdescr) - p57 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p57, i55, descr=otherdescr) - p69 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p69, i55, descr=otherdescr) - i71 = int_eq(i55, -9223372036854775808) - guard_false(i71) [] - i73 = int_mod(i55, 2) - i75 = int_rshift(i73, 63) - i76 = int_and(2, i75) - i77 = int_add(i73, i76) - p79 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p79, i77, descr=otherdescr) - i81 = int_eq(i77, 1) - guard_false(i81) [] - i0 = int_ge(i55, 1) - guard_true(i0) [] - label(p57) - jump(p57) - """ - expected = """ - [p8] - p54 = getfield_gc(p8, descr=valuedescr) - i55 = getfield_gc(p54, descr=nextdescr) - i71 = int_eq(i55, -9223372036854775808) - guard_false(i71) [] + [i55] i73 = int_mod(i55, 2) i75 = int_rshift(i73, 63) i76 = int_and(2, i75) i77 = int_add(i73, i76) i81 = int_eq(i77, 1) - guard_false(i81) [] i0 = int_ge(i55, 1) guard_true(i0) [] label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) jump(i55) """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ self.optimize_loop(ops, expected) class OptRenameStrlen(Optimization): @@ -467,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -260,7 +260,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,7 +268,9 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) if self.optimizer.loop.logops: diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2349,7 +2349,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -1101,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -330,8 +330,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): From noreply at buildbot.pypy.org Thu Mar 1 17:21:07 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 17:21:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill the old __cmp__ for Cells, and implement rich comparison instead Message-ID: <20120301162107.7E1768204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53064:d395491bc35d Date: 2012-03-01 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/d395491bc35d/ Log: kill the old __cmp__ for Cells, and implement rich comparison instead diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -30,21 +30,27 @@ if self.w_value is None: raise ValueError, "delete() on an empty cell" self.w_value = None - - def descr__cmp__(self, space, w_other): - # XXX fix me, cmp is gone + + def descr__lt__(self, space, w_other): other = space.interpclass_w(w_other) if not isinstance(other, Cell): return space.w_NotImplemented + if self.w_value is None: + # an empty cell is alway less than a non-empty one + if other.w_value is None: + return space.w_False + return space.w_True + elif other.w_value is None: + return space.w_False + return space.lt(self.w_value, other.w_value) - if self.w_value is None: - if other.w_value is None: - return space.newint(0) - return space.newint(-1) - elif other.w_value is None: - return space.newint(1) - - return space.cmp(self.w_value, other.w_value) + def descr__eq__(self, space, w_other): + other = space.interpclass_w(w_other) + if not isinstance(other, Cell): + return space.w_NotImplemented + if self.w_value is None or other.w_value is None: + return space.wrap(self.w_value == other.w_value) + return space.eq(self.w_value, other.w_value) def descr__reduce__(self, space): w_mod = space.getbuiltinmodule('_pickle_support') diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -82,14 +82,25 @@ def test_compare_cells(self): def f(n): if n: - x = 42 + x = n def f(y): return x + y return f - g0 = f(0).__closure__[0] + empty_cell_1 = f(0).__closure__[0] + empty_cell_2 = f(0).__closure__[0] g1 = f(1).__closure__[0] - assert cmp(g0, g1) == -1 + g2 = f(2).__closure__[0] + assert g1 < g2 + assert g1 <= g2 + assert g2 > g1 + assert g2 >= g1 + assert not g1 == g2 + assert g1 != g2 + # + assert empty_cell_1 == empty_cell_2 + assert not empty_cell_1 != empty_cell_2 + assert empty_cell_1 < g1 def test_leaking_class_locals(self): def f(x): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -919,6 +919,9 @@ GeneratorIterator.typedef.acceptable_as_base_class = False Cell.typedef = TypeDef("cell", + __total_ordering__ = 'auto', + __lt__ = interp2app(Cell.descr__lt__), + __eq__ = interp2app(Cell.descr__eq__), __hash__ = None, __reduce__ = interp2app(Cell.descr__reduce__), __setstate__ = interp2app(Cell.descr__setstate__), From noreply at buildbot.pypy.org Thu Mar 1 17:21:08 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 17:21:08 +0100 (CET) Subject: [pypy-commit] pypy default: (antocuni, arigo) simplify the definition of le and gt, to avoid using space.or_ Message-ID: <20120301162108.B876A8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53065:179988ad9d26 Date: 2012-03-01 17:20 +0100 http://bitbucket.org/pypy/pypy/changeset/179988ad9d26/ Log: (antocuni, arigo) simplify the definition of le and gt, to avoid using space.or_ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -66,12 +66,11 @@ @interp2app def auto__le__(space, w_self, w_other): - return space.or_(space.lt(w_self, w_other), - space.eq(w_self, w_other)) + return space.not_(space.gt(w_self, w_other)) @interp2app def auto__gt__(space, w_self, w_other): - return space.not_(space.le(w_self, w_other)) + return space.lt(w_other, w_self) @interp2app def auto__ge__(space, w_self, w_other): From noreply at buildbot.pypy.org Thu Mar 1 17:21:09 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 17:21:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120301162109.EF52B8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53066:c695ec2a520d Date: 2012-03-01 17:20 +0100 http://bitbucket.org/pypy/pypy/changeset/c695ec2a520d/ Log: hg merge default diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -66,12 +66,11 @@ @interp2app def auto__le__(space, w_self, w_other): - return space.or_(space.lt(w_self, w_other), - space.eq(w_self, w_other)) + return space.not_(space.gt(w_self, w_other)) @interp2app def auto__gt__(space, w_self, w_other): - return space.not_(space.le(w_self, w_other)) + return space.lt(w_other, w_self) @interp2app def auto__ge__(space, w_self, w_other): From noreply at buildbot.pypy.org Thu Mar 1 17:26:52 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 17:26:52 +0100 (CET) Subject: [pypy-commit] pypy default: (arigo) use directly space.lt instead of going through space.gt Message-ID: <20120301162652.7ED448204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53067:d0b0ec960281 Date: 2012-03-01 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/d0b0ec960281/ Log: (arigo) use directly space.lt instead of going through space.gt diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -66,7 +66,7 @@ @interp2app def auto__le__(space, w_self, w_other): - return space.not_(space.gt(w_self, w_other)) + return space.not_(space.lt(w_other, w_self)) @interp2app def auto__gt__(space, w_self, w_other): From noreply at buildbot.pypy.org Thu Mar 1 17:49:58 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 17:49:58 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Free. Message-ID: <20120301164958.0E7818204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53068:63da0976540f Date: 2012-03-01 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/63da0976540f/ Log: Free. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -801,14 +801,20 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) - + # + # XXX temporary, possibly move somewhere else + self.mc.MOV_rr(ebx.value, eax.value) + self.mc.LEA_rb(edi.value, -WORD * (FRAME_FIXED_SIZE-1)) + if IS_X86_32: + self.mc.MOV_sr(0, edi.value) + self.mc.CALL(imm(self.offstack_free_addr)) + self.mc.MOV_rr(eax.value, ebx.value) + # self.mc.ADD_ri(esp.value, WORD * OFFSTACK_REAL_FRAME + extra_esp) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): loc = self.cpu.CALLEE_SAVE_REGISTERS[i] self.mc.MOV_rb(loc.value, WORD*(-1-i)) # (ebp-4-4*i) -> reg self.mc.MOV_rb(ebp.value, 0) # (ebp) -> ebp - # XXX free! - self.mc.RET() def _call_header_shadowstack(self, gcrootmap): From noreply at buildbot.pypy.org Thu Mar 1 18:14:28 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 18:14:28 +0100 (CET) Subject: [pypy-commit] pypy default: in cpython array.array define tp_richcompare, not tp_compare; do the equivalent for pypy Message-ID: <20120301171428.B4B248204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53069:719543208434 Date: 2012-03-01 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/719543208434/ Log: in cpython array.array define tp_richcompare, not tp_compare; do the equivalent for pypy diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -583,14 +583,32 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): + def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) + return space_fn(w_lst1, w_lst2) else: return space.w_NotImplemented + def eq__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ge) + # Misc methods def buffer__Array(space, self): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,11 +536,11 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 + assert a == a + assert a == b + assert a < c + assert b == a + assert c > a def test_reduce(self): import pickle From noreply at buildbot.pypy.org Thu Mar 1 18:14:29 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 18:14:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120301171429.F2B0F8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53070:220421a31811 Date: 2012-03-01 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/220421a31811/ Log: hg merge default diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -66,7 +66,7 @@ @interp2app def auto__le__(space, w_self, w_other): - return space.not_(space.gt(w_self, w_other)) + return space.not_(space.lt(w_other, w_self)) @interp2app def auto__gt__(space, w_self, w_other): diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -572,14 +572,32 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): + def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) + return space_fn(w_lst1, w_lst2) else: return space.w_NotImplemented + def eq__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ge) + # Misc methods def buffer__Array(space, self): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -504,11 +504,11 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 + assert a == a + assert a == b + assert a < c + assert b == a + assert c > a def test_reduce(self): import pickle From noreply at buildbot.pypy.org Thu Mar 1 18:14:31 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 18:14:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: __cmp__ is gone, use __eq__ instead for this test Message-ID: <20120301171431.34E9E8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53071:411bb6d819b1 Date: 2012-03-01 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/411bb6d819b1/ Log: __cmp__ is gone, use __eq__ instead for this test diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -450,8 +450,8 @@ def test_compare(self): class comparable(object): - def __cmp__(self, other): - return 0 + def __eq__(self, other): + return True class incomparable(object): pass From noreply at buildbot.pypy.org Thu Mar 1 18:20:21 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 18:20:21 +0100 (CET) Subject: [pypy-commit] pypy default: (arigo) these asserts are pointless, they are done just above Message-ID: <20120301172021.ECF278204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53072:5cf1b98972c3 Date: 2012-03-01 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5cf1b98972c3/ Log: (arigo) these asserts are pointless, they are done just above diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert a == a - assert a == b - assert a < c - assert b == a - assert c > a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) From noreply at buildbot.pypy.org Thu Mar 1 18:25:11 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 18:25:11 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: SCRATCH reg is used here, too Message-ID: <20120301172511.AEEE88204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53073:23d74e8fff8b Date: 2012-03-01 06:59 -0800 http://bitbucket.org/pypy/pypy/changeset/23d74e8fff8b/ Log: SCRATCH reg is used here, too diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -1036,8 +1036,8 @@ with scratch_reg(self.mc): self.mc.load_imm(r.SCRATCH, nursery_top_adr) self.mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) + self.mc.cmp_op(0, r.r4.value, r.SCRATCH.value, signed=False) - self.mc.cmp_op(0, r.r4.value, r.SCRATCH.value, signed=False) fast_jmp_pos = self.mc.currpos() self.mc.nop() From noreply at buildbot.pypy.org Thu Mar 1 18:25:12 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 18:25:12 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: kill unused imports Message-ID: <20120301172512.E64768204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53074:985c28dad92b Date: 2012-03-01 07:30 -0800 http://bitbucket.org/pypy/pypy/changeset/985c28dad92b/ Log: kill unused imports diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -1,12 +1,6 @@ -import os -import struct -from pypy.jit.backend.ppc.ppc_form import PPCForm as Form -from pypy.jit.backend.ppc.ppc_field import ppc_fields -from pypy.jit.backend.ppc.regalloc import (TempInt, PPCFrameManager, +from pypy.jit.backend.ppc.regalloc import (PPCFrameManager, Regalloc, PPCRegisterManager) -from pypy.jit.backend.ppc.assembler import Assembler from pypy.jit.backend.ppc.opassembler import OpAssembler -from pypy.jit.backend.ppc.symbol_lookup import lookup from pypy.jit.backend.ppc.codebuilder import (PPCBuilder, OverwritingBuilder, scratch_reg) from pypy.jit.backend.ppc.arch import (IS_PPC_32, IS_PPC_64, WORD, @@ -16,26 +10,17 @@ FLOAT_INT_CONVERSION, FORCE_INDEX, SIZE_LOAD_IMM_PATCH_SP) from pypy.jit.backend.ppc.helper.assembler import (gen_emit_cmp_op, - encode32, encode64, - decode32, decode64, - count_reg_args, - Saved_Volatiles) + decode64, Saved_Volatiles) from pypy.jit.backend.ppc.helper.regalloc import _check_imm_arg import pypy.jit.backend.ppc.register as r import pypy.jit.backend.ppc.condition as c -from pypy.jit.metainterp.history import (Const, ConstPtr, JitCellToken, - TargetToken, AbstractFailDescr) -from pypy.jit.backend.llsupport.asmmemmgr import (BlockBuilderMixin, - AsmMemoryManager, - MachineDataBlockWrapper) -from pypy.jit.backend.llsupport.regalloc import (RegisterManager, - compute_vars_longevity) -from pypy.jit.backend.llsupport import symbolic +from pypy.jit.metainterp.history import AbstractFailDescr +from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper +from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity from pypy.jit.backend.model import CompiledLoopToken -from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory +from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.jit.metainterp.resoperation import rop -from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr, - ConstFloat, Box, INT, REF, FLOAT) +from pypy.jit.metainterp.history import (INT, REF, FLOAT) from pypy.jit.backend.x86.support import values_array from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) From noreply at buildbot.pypy.org Thu Mar 1 18:25:14 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 18:25:14 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: code cleanup Message-ID: <20120301172514.22AB58204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53075:6bc2babe046f Date: 2012-03-01 07:40 -0800 http://bitbucket.org/pypy/pypy/changeset/6bc2babe046f/ Log: code cleanup diff --git a/pypy/jit/backend/ppc/opassembler.py b/pypy/jit/backend/ppc/opassembler.py --- a/pypy/jit/backend/ppc/opassembler.py +++ b/pypy/jit/backend/ppc/opassembler.py @@ -2,9 +2,8 @@ gen_emit_unary_cmp_op) import pypy.jit.backend.ppc.condition as c import pypy.jit.backend.ppc.register as r -from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, - GPR_SAVE_AREA, BACKCHAIN_SIZE, - MAX_REG_PARAMS) +from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, BACKCHAIN_SIZE, + MAX_REG_PARAMS) from pypy.jit.metainterp.history import (JitCellToken, TargetToken, Box, AbstractFailDescr, FLOAT, INT, REF) @@ -465,7 +464,7 @@ # restore the arguments stored on the stack if result is not None: - resloc = regalloc.after_call(result) + regalloc.after_call(result) class FieldOpAssembler(object): @@ -763,7 +762,6 @@ regalloc.possibly_free_var(srcaddr_box) def _gen_address_inside_string(self, baseloc, ofsloc, resloc, is_unicode): - cpu = self.cpu if is_unicode: ofs_items, _, _ = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) @@ -1107,7 +1105,6 @@ from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset resloc = regalloc.force_allocate_reg(resbox) with scratch_reg(self.mc): self.mov_loc_loc(arglocs[1], r.SCRATCH) From noreply at buildbot.pypy.org Thu Mar 1 18:25:15 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Mar 2012 18:25:15 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: more code cleanups Message-ID: <20120301172515.55DE48204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53076:ce1f0e276c60 Date: 2012-03-01 09:24 -0800 http://bitbucket.org/pypy/pypy/changeset/ce1f0e276c60/ Log: more code cleanups diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py --- a/pypy/jit/backend/ppc/codebuilder.py +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -1,27 +1,13 @@ import os -import struct from pypy.jit.backend.ppc.ppc_form import PPCForm as Form from pypy.jit.backend.ppc.ppc_field import ppc_fields -from pypy.jit.backend.ppc.regalloc import (TempInt, PPCFrameManager, - Regalloc) from pypy.jit.backend.ppc.assembler import Assembler -from pypy.jit.backend.ppc.symbol_lookup import lookup -from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, NONVOLATILES, - GPR_SAVE_AREA, IS_PPC_64) -from pypy.jit.backend.ppc.helper.assembler import gen_emit_cmp_op +from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64) import pypy.jit.backend.ppc.register as r -import pypy.jit.backend.ppc.condition as c -from pypy.jit.metainterp.history import (Const, ConstPtr, JitCellToken, - TargetToken, AbstractFailDescr) -from pypy.jit.backend.llsupport.asmmemmgr import (BlockBuilderMixin, AsmMemoryManager, MachineDataBlockWrapper) -from pypy.jit.backend.llsupport.regalloc import (RegisterManager, - compute_vars_longevity) -from pypy.jit.backend.llsupport import symbolic -from pypy.jit.backend.model import CompiledLoopToken -from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory +from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from pypy.jit.backend.llsupport.regalloc import RegisterManager +from pypy.rpython.lltypesystem import lltype, rffi from pypy.jit.metainterp.resoperation import rop -from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr, - ConstFloat, Box, INT, REF, FLOAT) from pypy.tool.udir import udir from pypy.rlib.objectmodel import we_are_translated @@ -1181,17 +1167,6 @@ assert self.r0_in_use self.r0_in_use = False - def _dump_trace(self, addr, name, formatter=-1): - if not we_are_translated(): - if formatter != -1: - name = name % formatter - dir = udir.ensure('asm', dir=True) - f = dir.join(name).open('wb') - data = rffi.cast(rffi.CCHARP, addr) - for i in range(self.currpos()): - f.write(data[i]) - f.close() - class scratch_reg(object): def __init__(self, mc): self.mc = mc diff --git a/pypy/jit/backend/ppc/helper/assembler.py b/pypy/jit/backend/ppc/helper/assembler.py --- a/pypy/jit/backend/ppc/helper/assembler.py +++ b/pypy/jit/backend/ppc/helper/assembler.py @@ -1,9 +1,8 @@ import pypy.jit.backend.ppc.condition as c -from pypy.rlib.rarithmetic import r_uint, r_longlong, intmask +from pypy.rlib.rarithmetic import intmask from pypy.jit.backend.ppc.arch import (MAX_REG_PARAMS, IS_PPC_32, WORD, BACKCHAIN_SIZE) from pypy.jit.metainterp.history import FLOAT -from pypy.rlib.unroll import unrolling_iterable import pypy.jit.backend.ppc.register as r from pypy.rpython.lltypesystem import rffi, lltype diff --git a/pypy/jit/backend/ppc/helper/regalloc.py b/pypy/jit/backend/ppc/helper/regalloc.py --- a/pypy/jit/backend/ppc/helper/regalloc.py +++ b/pypy/jit/backend/ppc/helper/regalloc.py @@ -1,5 +1,4 @@ from pypy.jit.metainterp.history import ConstInt -from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.history import Box IMM_SIZE = 2 ** 15 - 1 @@ -63,8 +62,6 @@ def f(self, op): boxes = op.getarglist() b0, b1 = boxes - imm_b0 = check_imm_box(b0) - imm_b1 = check_imm_box(b1) l0 = self._ensure_value_is_boxed(b0, boxes) l1 = self._ensure_value_is_boxed(b1, boxes) locs = [l0, l1] diff --git a/pypy/jit/backend/ppc/locations.py b/pypy/jit/backend/ppc/locations.py --- a/pypy/jit/backend/ppc/locations.py +++ b/pypy/jit/backend/ppc/locations.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.history import INT, FLOAT, REF +from pypy.jit.metainterp.history import INT, FLOAT import sys # XXX import from arch.py, currently we have a circular import diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py --- a/pypy/jit/backend/ppc/regalloc.py +++ b/pypy/jit/backend/ppc/regalloc.py @@ -1,17 +1,15 @@ from pypy.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager, TempBox, compute_vars_longevity) from pypy.jit.backend.ppc.arch import (WORD, MY_COPY_OF_REGS) -from pypy.jit.backend.ppc.jump import (remap_frame_layout_mixed, - remap_frame_layout) +from pypy.jit.backend.ppc.jump import remap_frame_layout from pypy.jit.backend.ppc.locations import imm from pypy.jit.backend.ppc.helper.regalloc import (_check_imm_arg, - check_imm_box, - prepare_cmp_op, - prepare_unary_int_op, - prepare_binary_int_op, - prepare_binary_int_op_with_imm, - prepare_unary_cmp) -from pypy.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, + prepare_cmp_op, + prepare_unary_int_op, + prepare_binary_int_op, + prepare_binary_int_op_with_imm, + prepare_unary_cmp) +from pypy.jit.metainterp.history import (Const, ConstInt, ConstPtr, Box, BoxPtr, INT, REF, FLOAT) from pypy.jit.metainterp.history import JitCellToken, TargetToken @@ -20,9 +18,7 @@ from pypy.rpython.lltypesystem import rffi, lltype, rstr from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.descr import ArrayDescr -from pypy.jit.codewriter.effectinfo import EffectInfo import pypy.jit.backend.ppc.register as r -from pypy.jit.codewriter import heaptracker from pypy.jit.backend.llsupport.descr import unpack_arraydescr from pypy.jit.backend.llsupport.descr import unpack_fielddescr from pypy.jit.backend.llsupport.descr import unpack_interiorfielddescr diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -1,22 +1,12 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi -from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter -from pypy.rlib.objectmodel import we_are_translated -from pypy.jit.metainterp import history, compile -from pypy.jit.metainterp.history import BoxPtr -from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.ppc.arch import FORCE_INDEX_OFS -from pypy.jit.backend.x86.profagent import ProfileAgent from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU -from pypy.jit.backend.x86 import regloc -from pypy.jit.backend.x86.support import values_array from pypy.jit.backend.ppc.ppc_assembler import AssemblerPPC -from pypy.jit.backend.ppc.arch import NONVOLATILES, GPR_SAVE_AREA, WORD -from pypy.jit.backend.ppc.regalloc import PPCRegisterManager, PPCFrameManager +from pypy.jit.backend.ppc.arch import WORD from pypy.jit.backend.ppc.codebuilder import PPCBuilder from pypy.jit.backend.ppc import register as r -import sys from pypy.tool.ansi_print import ansi_log log = py.log.Producer('jitbackend') From noreply at buildbot.pypy.org Thu Mar 1 20:32:48 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Mar 2012 20:32:48 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: draft for a new blog post Message-ID: <20120301193248.998A08204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r4111:53e7a4950bba Date: 2012-03-01 20:32 +0100 http://bitbucket.org/pypy/extradoc/changeset/53e7a4950bba/ Log: draft for a new blog post diff --git a/blog/draft/py3k-status-update-1.rst b/blog/draft/py3k-status-update-1.rst --- a/blog/draft/py3k-status-update-1.rst +++ b/blog/draft/py3k-status-update-1.rst @@ -1,4 +1,5 @@ -Hello, +Py3k status update +------------------ Thank to all the people who donated_ to the `py3k proposal`_, we managed to collect enough money to start to work on the first step. This is a quick @@ -45,6 +46,7 @@ .. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html .. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/src/py3k .. _`Leysin sprint`: http://morepypy.blogspot.com/2011/12/leysin-winter-sprint.html .. _`py3k tests nightly`: http://buildbot.pypy.org/summary?branch=py3k .. _`lexical exception handlers`: http://bugs.python.org/issue3021 diff --git a/blog/draft/py3k-status-update-2.rst b/blog/draft/py3k-status-update-2.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-2.rst @@ -0,0 +1,57 @@ +Py3k status update #2 +--------------------- + +This is the second status update about my work on the `py3k branch`_, which I +can do thank to all the people who donated_ to the `py3k proposal`_. + +Since my previous `status update`_, things improved a lot: first of all, I +fixed the syntax of many more tests, which were failing on the branch because +they used constructs which are no longer valid in Python 3, such as ``u''`` +strings, the ``print`` statement or the old ``except Exception, e`` syntax. I +have to say that this work is tedious and not very rewarding, but it has to be +done anyway, so that the real failures can stand up. + +Then, I spent most of the rest of the time by killing features which are +present in Python 2 and are gone in Python 3. + +Some of them were easy and mechnical: for example, I removed all the function +attributes such as ``func_code`` and ``func_closure``, which has been renamed +to ``__code__`` and ``__closure__``, and then I had to find and fix all the +places which still expeted the old ones. + +Some were trickier: I removed support for the ``cmp`` function and the +``__cmp__`` special method, but this also meant that I had to fix a few types +which relied on it to be comparable (for example, did you know that the cells +contained in ``__closure__`` are comparable?). At the same time, I also +removed the old behavior which in Python 2 allows us to compare arbitrary +objects with ``<``, ``>`` & co.: in Python 3 the only comparisons allowed +between incompatible types are ``==`` and ``!=``. + +Speaking of old special methods, ``__hex__`` and ``__oct__`` are gone as well +(and I didn't even know about their existence before removing them :-)) + +But the most important breakthrough was the removal of the ``_file`` module, +containing the implementation of the ``file`` type in Python 2, which is now +gone since in Python 3 files are handled by the ``_io`` module. Killing the +module was not straightforward, becase some of the importing logic was tightly +tied to the internal implementation of files, so it needed some refactoring. +Finally, I had to fix the ``marshal`` module to correctly detect text files +vs. byte files. + +Among these things, I fixed tons of smaller issues here and there. As a +result, there are much less failing tests than few weeks ago. Obviously the +number itself does not mean much, because sometimes fixing a single test takes +hours, and some other times by changing one line you fix tens of tests. But at +the end, seeing it dropping from 999 to 650_ is always nice :-). + +The road for having a pypy3k is still long, but everything is going fine so +far. Stay tuned for more updates! + +cheers, +Antonio + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/src/py3k +.. _`status update`: http://morepypy.blogspot.com/2012/02/py3k-status-update.html +.. _650: http://buildbot.pypy.org/summary?category=linux32&branch=py3k&recentrev=53071:411bb6d819b1 From noreply at buildbot.pypy.org Thu Mar 1 20:40:46 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 20:40:46 +0100 (CET) Subject: [pypy-commit] pypy default: A test specifically for the presence of the LEA at the start Message-ID: <20120301194046.770F98204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53077:da86f3876dcb Date: 2012-03-01 20:40 +0100 http://bitbucket.org/pypy/pypy/changeset/da86f3876dcb/ Log: A test specifically for the presence of the LEA at the start of the bridge. diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() From noreply at buildbot.pypy.org Thu Mar 1 20:41:39 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 20:41:39 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: In-progress. Message-ID: <20120301194139.EB22A8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53078:430939a003f9 Date: 2012-03-01 20:41 +0100 http://bitbucket.org/pypy/pypy/changeset/430939a003f9/ Log: In-progress. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -442,7 +442,7 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # - self._call_header_with_stack_check() + frame_size_pos = self._call_header_with_stack_check() clt._debug_nbargs = len(inputargs) operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) @@ -467,8 +467,8 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - #self._patch_stackadjust(rawstart + stackadjustpos, - # frame_depth )#+ param_depth) + self._patch_stackadjust(rawstart + frame_size_pos, + frame_depth )#+ param_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -720,11 +720,19 @@ return frame_depth#, param_depth def _patchable_stackadjust(self): + xxx # stack adjustment LEA self.mc.LEA32_rb(esp.value, 0) return self.mc.get_relative_pos() - 4 - def _patch_stackadjust(self, adr_lea, allocated_depth): + def _patch_stackadjust(self, adr_to_fix, allocated_depth): + # patch the requested size in the call to malloc/realloc + mc = codebuf.MachineCodeBlockWrapper() + words = FRAME_FIXED_SIZE + 1 + allocated_depth + mc.writeimm32(words * WORD) + mc.copy_to_raw_memory(adr_to_fix) + return + # patch stack adjustment LEA mc = codebuf.MachineCodeBlockWrapper() # Compute the correct offset for the instruction LEA ESP, [EBP-4*words] @@ -744,7 +752,7 @@ # Also, make sure this is consistent with FRAME_FIXED_SIZE. if IS_X86_32: self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME-1)) - self.mc.PUSH_i32(4096) # XXX XXX! + self.mc.PUSH_i32(0x77777777) # temporary elif IS_X86_64: # XXX very heavily save and restore all possible argument registers save_regs = [r9, r8, ecx, edx, esi, edi] @@ -757,7 +765,8 @@ for i in range(len(save_xmm_regs)): self.mc.MOVSD_sx(WORD * (base + i), save_xmm_regs[i].value) # - self.mc.MOV_ri(edi.value, 4096) # XXX XXX! + self.mc.MOV_riu32(edi.value, 0x77777777) # temporary + frame_size_pos = self.mc.get_relative_pos() - 4 # self.mc.CALL(imm(self.offstack_malloc_addr)) # @@ -779,6 +788,8 @@ if gcrootmap and gcrootmap.is_shadow_stack: self._call_header_shadowstack(gcrootmap) + return frame_size_pos + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -795,7 +806,7 @@ assert 0 < offset <= 127 self.mc.overwrite(jb_location-1, chr(offset)) # - self._call_header() + return self._call_header() def _call_footer(self, extra_esp=0): gcrootmap = self.cpu.gc_ll_descr.gcrootmap @@ -2046,9 +2057,7 @@ # now we return from the complete frame, which starts from # _call_header_with_stack_check(). We have to compute how many - # extra PUSHes we just did. - # throws away most of the frame, including all the PUSHes that we - # did just above. + # extra PUSHes we just did, to throw them away in one go. self._call_footer(extra_esp) rawstart = mc.materialize(self.cpu.asmmemmgr, []) From noreply at buildbot.pypy.org Thu Mar 1 20:41:41 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 20:41:41 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: hg merge default Message-ID: <20120301194141.38B478204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53079:c7a26cb92ae5 Date: 2012-03-01 20:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c7a26cb92ae5/ Log: hg merge default diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -326,8 +326,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -583,14 +583,32 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): + def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) + return space_fn(w_lst1, w_lst2) else: return space.w_NotImplemented + def eq__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ge) + # Misc methods def buffer__Array(space, self): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) From noreply at buildbot.pypy.org Thu Mar 1 20:43:01 2012 From: noreply at buildbot.pypy.org (edelsohn) Date: Thu, 1 Mar 2012 20:43:01 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: a few grammar fixes. Message-ID: <20120301194301.EE8748204C@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: extradoc Changeset: r4112:f475bf882c28 Date: 2012-03-01 14:42 -0500 http://bitbucket.org/pypy/extradoc/changeset/f475bf882c28/ Log: a few grammar fixes. diff --git a/blog/draft/py3k-status-update-2.rst b/blog/draft/py3k-status-update-2.rst --- a/blog/draft/py3k-status-update-2.rst +++ b/blog/draft/py3k-status-update-2.rst @@ -2,9 +2,9 @@ --------------------- This is the second status update about my work on the `py3k branch`_, which I -can do thank to all the people who donated_ to the `py3k proposal`_. +can work on thanks to all of the people who donated_ to the `py3k proposal`_. -Since my previous `status update`_, things improved a lot: first of all, I +Since my previous `status update`_, things have improved a lot: first of all, I fixed the syntax of many more tests, which were failing on the branch because they used constructs which are no longer valid in Python 3, such as ``u''`` strings, the ``print`` statement or the old ``except Exception, e`` syntax. I From noreply at buildbot.pypy.org Thu Mar 1 20:45:54 2012 From: noreply at buildbot.pypy.org (edelsohn) Date: Thu, 1 Mar 2012 20:45:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: more fixes with the conclusion. Message-ID: <20120301194554.AA3FC8204C@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: extradoc Changeset: r4113:50725fba7b65 Date: 2012-03-01 14:45 -0500 http://bitbucket.org/pypy/extradoc/changeset/50725fba7b65/ Log: more fixes with the conclusion. diff --git a/blog/draft/py3k-status-update-2.rst b/blog/draft/py3k-status-update-2.rst --- a/blog/draft/py3k-status-update-2.rst +++ b/blog/draft/py3k-status-update-2.rst @@ -39,10 +39,10 @@ vs. byte files. Among these things, I fixed tons of smaller issues here and there. As a -result, there are much less failing tests than few weeks ago. Obviously the +result, there are many fewer failing tests than a few weeks ago. Obviously the number itself does not mean much, because sometimes fixing a single test takes -hours, and some other times by changing one line you fix tens of tests. But at -the end, seeing it dropping from 999 to 650_ is always nice :-). +hours, and some other times by changing one line one fixes tens of tests. But at +the end, seeing it dropping from 999 to 650_ always is nice and rewarding :-). The road for having a pypy3k is still long, but everything is going fine so far. Stay tuned for more updates! From noreply at buildbot.pypy.org Thu Mar 1 20:47:11 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 20:47:11 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: A typo left Message-ID: <20120301194711.628458204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4114:35bfb98c11e0 Date: 2012-03-01 20:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/35bfb98c11e0/ Log: A typo left diff --git a/blog/draft/py3k-status-update-2.rst b/blog/draft/py3k-status-update-2.rst --- a/blog/draft/py3k-status-update-2.rst +++ b/blog/draft/py3k-status-update-2.rst @@ -33,7 +33,7 @@ But the most important breakthrough was the removal of the ``_file`` module, containing the implementation of the ``file`` type in Python 2, which is now gone since in Python 3 files are handled by the ``_io`` module. Killing the -module was not straightforward, becase some of the importing logic was tightly +module was not straightforward, because some of the importing logic was tightly tied to the internal implementation of files, so it needed some refactoring. Finally, I had to fix the ``marshal`` module to correctly detect text files vs. byte files. From noreply at buildbot.pypy.org Thu Mar 1 20:48:31 2012 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 Mar 2012 20:48:31 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: typo Message-ID: <20120301194831.D6F648204C@wyvern.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r4115:3eba8a095ca2 Date: 2012-03-01 11:48 -0800 http://bitbucket.org/pypy/extradoc/changeset/3eba8a095ca2/ Log: typo diff --git a/blog/draft/py3k-status-update-2.rst b/blog/draft/py3k-status-update-2.rst --- a/blog/draft/py3k-status-update-2.rst +++ b/blog/draft/py3k-status-update-2.rst @@ -17,7 +17,7 @@ Some of them were easy and mechnical: for example, I removed all the function attributes such as ``func_code`` and ``func_closure``, which has been renamed to ``__code__`` and ``__closure__``, and then I had to find and fix all the -places which still expeted the old ones. +places which still expected the old ones. Some were trickier: I removed support for the ``cmp`` function and the ``__cmp__`` special method, but this also meant that I had to fix a few types From noreply at buildbot.pypy.org Thu Mar 1 21:07:26 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Mar 2012 21:07:26 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Finish bridge support. XXX far too heavy for now. Message-ID: <20120301200726.705698204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53080:241e1f25e4f4 Date: 2012-03-01 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/241e1f25e4f4/ Log: Finish bridge support. XXX far too heavy for now. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -514,7 +514,7 @@ operations, self.current_clt.allgcrefs) - #stackadjustpos = self._patchable_stackadjust() + frame_size_pos = self._enter_bridge_code() (frame_depth #, param_depth ) = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() @@ -526,8 +526,8 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - #self._patch_stackadjust(rawstart + stackadjustpos, - # frame_depth + param_depth) + self._patch_stackadjust(rawstart + frame_size_pos, + frame_depth) # + param_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests @@ -725,6 +725,55 @@ self.mc.LEA32_rb(esp.value, 0) return self.mc.get_relative_pos() - 4 + def _enter_bridge_code(self): + # XXX XXX far too heavy saving and restoring + j = 0 + if self.cpu.supports_floats: + for reg in self._regalloc.xrm.save_around_call_regs: + self.mc.MOVSD_sx(j, reg.value) + j += 8 + # + save_regs = self._regalloc.rm.save_around_call_regs + if IS_X86_32: + assert len(save_regs) == 3 + self.mc.MOV_sr(j, save_regs[0].value) + self.mc.PUSH_r(save_regs[1].value) + self.mc.PUSH_r(save_regs[2].value) + # 4 PUSHes in total, stack remains aligned + self.mc.PUSH_i32(0x77777777) # patched later + result = self.mc.get_relative_pos() - 4 + self.mc.LEA_rb(eax.value, -WORD * (FRAME_FIXED_SIZE-1)) + self.mc.PUSH_r(eax.value) + elif IS_X86_64: + # an even number of PUSHes, stack remains aligned + assert len(save_regs) & 1 == 0 + for reg in save_regs: + self.mc.PUSH_r(reg.value) + self.mc.LEA_rb(edi.value, -WORD * (FRAME_FIXED_SIZE-1)) + self.mc.MOV_riu32(esi.value, 0x77777777) # patched later + result = self.mc.get_relative_pos() - 4 + # + self.mc.CALL(imm(self.offstack_realloc_addr)) + # + self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) + # + if IS_X86_32: + self.mc.ADD_ri(esp.value, 2*WORD) + self.mc.POP_r(save_regs[2].value) + self.mc.POP_r(save_regs[1].value) + self.mc.MOV_rs(save_regs[0].value, j) + elif IS_X86_64: + for i in range(len(save_regs)-1, -1, -1): + self.mc.POP_r(save_regs[i].value) + # + if self.cpu.supports_floats: + j = 0 + for reg in self._regalloc.xrm.save_around_call_regs: + self.mc.MOVSD_xs(reg.value, j) + j += 8 + # + return result + def _patch_stackadjust(self, adr_to_fix, allocated_depth): # patch the requested size in the call to malloc/realloc mc = codebuf.MachineCodeBlockWrapper() From noreply at buildbot.pypy.org Thu Mar 1 21:45:02 2012 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Mar 2012 21:45:02 +0100 (CET) Subject: [pypy-commit] pypy default: try the crashing case first Message-ID: <20120301204502.538C28204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r53081:d174264562db Date: 2012-03-01 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d174264562db/ Log: try the crashing case first diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): From noreply at buildbot.pypy.org Thu Mar 1 21:45:03 2012 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Mar 2012 21:45:03 +0100 (CET) Subject: [pypy-commit] pypy default: Dont raise OverflowError when resuming from a guard_no_overflow from the short preamble (should fix issue 1072) Message-ID: <20120301204503.977768204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r53082:90010b41b565 Date: 2012-03-01 21:43 +0100 http://bitbucket.org/pypy/pypy/changeset/90010b41b565/ Log: Dont raise OverflowError when resuming from a guard_no_overflow from the short preamble (should fix issue 1072) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: From noreply at buildbot.pypy.org Thu Mar 1 21:45:04 2012 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Mar 2012 21:45:04 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20120301204504.D791A8204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r53083:49afda04d4ce Date: 2012-03-01 21:44 +0100 http://bitbucket.org/pypy/pypy/changeset/49afda04d4ce/ Log: hg merge diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -326,8 +326,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -583,14 +583,32 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): + def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) + return space_fn(w_lst1, w_lst2) else: return space.w_NotImplemented + def eq__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ge) + # Misc methods def buffer__Array(space, self): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) From noreply at buildbot.pypy.org Thu Mar 1 23:06:46 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Mar 2012 23:06:46 +0100 (CET) Subject: [pypy-commit] pypy jit-tracehook: A branch to allow JITing of python when there's a tracehook installed. Message-ID: <20120301220646.EC5988204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-tracehook Changeset: r53084:e54e45812b77 Date: 2012-03-01 17:06 -0500 http://bitbucket.org/pypy/pypy/changeset/e54e45812b77/ Log: A branch to allow JITing of python when there's a tracehook installed. diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -37,26 +37,17 @@ def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): bytecode.jit_cells[next_instr, is_being_profiled] = newcell -def confirm_enter_jit(next_instr, is_being_profiled, bytecode, frame, ec): - return (frame.w_f_trace is None and - ec.w_tracefunc is None) - -def can_never_inline(next_instr, is_being_profiled, bytecode): - return False - def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] - greens = ['next_instr', 'is_being_profiled', 'pycode'] + greens = ['next_instr', 'is_being_profiled', 'pycode', 'w_f_trace', 'w_tracefunc'] virtualizables = ['frame'] pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, - confirm_enter_jit = confirm_enter_jit, - can_never_inline = can_never_inline, should_unroll_one_iteration = should_unroll_one_iteration, name='pypyjit') @@ -71,7 +62,7 @@ while True: pypyjitdriver.jit_merge_point(ec=ec, frame=self, next_instr=next_instr, pycode=pycode, - is_being_profiled=is_being_profiled) + is_being_profiled=is_being_profiled, w_f_trace=self.w_f_trace, w_tracefunc=ec.w_tracefunc) co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) @@ -94,7 +85,7 @@ # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), - is_being_profiled=self.is_being_profiled) + is_being_profiled=self.is_being_profiled, w_f_trace=self.w_f_trace, w_tracefunc=ec.w_tracefunc) return jumpto def _get_adapted_tick_counter(): From noreply at buildbot.pypy.org Thu Mar 1 23:15:19 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Mar 2012 23:15:19 +0100 (CET) Subject: [pypy-commit] pypy jit-tracehook: fix for new greenkeys Message-ID: <20120301221519.697368204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-tracehook Changeset: r53085:a174f7421083 Date: 2012-03-01 17:15 -0500 http://bitbucket.org/pypy/pypy/changeset/a174f7421083/ Log: fix for new greenkeys diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -26,16 +26,16 @@ JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] -def get_printable_location(next_instr, is_being_profiled, bytecode): +def get_printable_location(next_instr, is_being_profiled, bytecode, w_f_trace, w_tracefunc): from pypy.tool.stdlib_opcode import opcode_method_names name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) -def get_jitcell_at(next_instr, is_being_profiled, bytecode): - return bytecode.jit_cells.get((next_instr, is_being_profiled), None) +def get_jitcell_at(next_instr, is_being_profiled, bytecode, w_f_trace, w_tracefunc): + return bytecode.jit_cells.get((next_instr, is_being_profiled, w_f_trace, w_tracefunc), None) -def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): - bytecode.jit_cells[next_instr, is_being_profiled] = newcell +def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode, w_f_trace, w_tracefunc): + bytecode.jit_cells[next_instr, is_being_profiled, w_f_trace, w_tracefunc] = newcell def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 From notifications-noreply at bitbucket.org Thu Mar 1 23:26:58 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Mar 2012 22:26:58 -0000 Subject: [pypy-commit] Notification: lang-js Message-ID: <20120301222658.20120.80527@bitbucket03.managed.contegix.com> You have received a notification from Leonardo Santagada. Hi, I forked lang-js. My fork is at https://bitbucket.org/santagada/lang-js. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Thu Mar 1 23:37:40 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Mar 2012 23:37:40 +0100 (CET) Subject: [pypy-commit] pypy jit-tracehook: update another function for the new greenkey Message-ID: <20120301223740.B7DFF8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-tracehook Changeset: r53086:13bff91d31c5 Date: 2012-03-01 17:37 -0500 http://bitbucket.org/pypy/pypy/changeset/13bff91d31c5/ Log: update another function for the new greenkey diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -37,7 +37,7 @@ def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode, w_f_trace, w_tracefunc): bytecode.jit_cells[next_instr, is_being_profiled, w_f_trace, w_tracefunc] = newcell -def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): +def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode, w_f_trace, w_tracefunc): return (bytecode.co_flags & CO_GENERATOR) != 0 class PyPyJitDriver(JitDriver): From noreply at buildbot.pypy.org Fri Mar 2 00:09:23 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Fri, 2 Mar 2012 00:09:23 +0100 (CET) Subject: [pypy-commit] pypy default: jittify_and_run: print the repr of the graph eval result, so raw bytes get readable Message-ID: <20120301230923.3525B8204C@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r53087:2881f17e1ffc Date: 2012-03-02 00:07 +0100 http://bitbucket.org/pypy/pypy/changeset/2881f17e1ffc/ Log: jittify_and_run: print the repr of the graph eval result, so raw bytes get readable diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) From noreply at buildbot.pypy.org Fri Mar 2 00:11:53 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Fri, 2 Mar 2012 00:11:53 +0100 (CET) Subject: [pypy-commit] pypy pytest: merge default Message-ID: <20120301231153.924138204C@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: pytest Changeset: r53088:4010eecfac8d Date: 2012-03-02 00:11 +0100 http://bitbucket.org/pypy/pypy/changeset/4010eecfac8d/ Log: merge default diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -583,14 +583,32 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): + def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) + return space_fn(w_lst1, w_lst2) else: return space.w_NotImplemented + def eq__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ANY(space, self, other): + return _cmp_impl(space, self, other, space.ge) + # Misc methods def buffer__Array(space, self): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) From noreply at buildbot.pypy.org Fri Mar 2 10:35:48 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:48 +0100 (CET) Subject: [pypy-commit] pypy py3k: cmp is gone, use direct comparison instead Message-ID: <20120302093548.E22DB8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53089:ff55c5b2ae11 Date: 2012-03-02 10:15 +0100 http://bitbucket.org/pypy/pypy/changeset/ff55c5b2ae11/ Log: cmp is gone, use direct comparison instead diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -674,10 +674,10 @@ for k in self.other: self.failIf(k in d) #cmp - self.assertEqual(cmp(p,p), 0) - self.assertEqual(cmp(d,d), 0) - self.assertEqual(cmp(p,d), -1) - self.assertEqual(cmp(d,p), 1) + self.assert_(p == p) + self.assert_(d == d) + self.assert_(p < d) + self.assert_(d > p) #__non__zero__ if p: self.fail("Empty mapping must compare to False") if not d: self.fail("Full mapping must compare to True") From noreply at buildbot.pypy.org Fri Mar 2 10:35:50 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:50 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill tests about mixed sequences, they are unordeable now Message-ID: <20120302093550.2D6EC8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53090:a4c2a1793449 Date: 2012-03-02 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a4c2a1793449/ Log: kill tests about mixed sequences, they are unordeable now diff --git a/pypy/module/__builtin__/test/test_minmax.py b/pypy/module/__builtin__/test/test_minmax.py --- a/pypy/module/__builtin__/test/test_minmax.py +++ b/pypy/module/__builtin__/test/test_minmax.py @@ -17,9 +17,6 @@ def test_min_strings(self): assert min('aaa', 'bbb', 'c') == 'aaa' - def test_min_mixed(self): - assert min('1', 2, 3, 'aa') == 2 - def test_min_noargs(self): raises(TypeError, min) @@ -43,9 +40,6 @@ def test_max_strings(self): assert max('aaa', 'bbb', 'c') == 'c' - def test_max_mixed(self): - assert max('1', 2, 3, 'aa') == 'aa' - def test_max_noargs(self): raises(TypeError, max) @@ -66,9 +60,6 @@ def test_max_strings(self): assert max(('aaa', 'bbb', 'c')) == 'c' - def test_max_mixed(self): - assert max(('1', 2, 3, 'aa')) == 'aa' - class AppTestMinList: def test_min_usual(self): @@ -82,6 +73,3 @@ def test_min_strings(self): assert min(['aaa', 'bbb', 'c']) == 'aaa' - - def test_min_mixed(self): - assert min(['1', 2, 3, 'aa']) == 2 From noreply at buildbot.pypy.org Fri Mar 2 10:35:51 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:51 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix test_bisect: None is no longer comparable to ints, and map no longer returns a list Message-ID: <20120302093551.606678204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53091:cb0485337b72 Date: 2012-03-02 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/cb0485337b72/ Log: fix test_bisect: None is no longer comparable to ints, and map no longer returns a list diff --git a/pypy/module/_bisect/test/test_bisect.py b/pypy/module/_bisect/test/test_bisect.py --- a/pypy/module/_bisect/test/test_bisect.py +++ b/pypy/module/_bisect/test/test_bisect.py @@ -9,7 +9,6 @@ def test_bisect_left(self): from _bisect import bisect_left a = [0, 5, 6, 6, 6, 7] - assert bisect_left(a, None) == 0 assert bisect_left(a, -3) == 0 assert bisect_left(a, 0) == 0 assert bisect_left(a, 3) == 1 @@ -47,7 +46,6 @@ def test_bisect_right(self): from _bisect import bisect_right a = [0, 5, 6, 6, 6, 7] - assert bisect_right(a, None) == 0 assert bisect_right(a, -3) == 0 assert bisect_right(a, 0) == 1 assert bisect_right(a, 3) == 1 @@ -85,11 +83,11 @@ a = [0, 5, 6, 6, 6, 7] insort_left(a, 6.0) assert a == [0, 5, 6.0, 6, 6, 6, 7] - assert map(type, a) == [int, int, float, int, int, int, int] + assert list(map(type, a)) == [int, int, float, int, int, int, int] def test_insort_right(self): from _bisect import insort_right a = [0, 5, 6, 6, 6, 7] insort_right(a, 6.0) assert a == [0, 5, 6, 6, 6, 6.0, 7] - assert map(type, a) == [int, int, int, int, int, float, int] + assert list(map(type, a)) == [int, int, int, int, int, float, int] From noreply at buildbot.pypy.org Fri Mar 2 10:35:52 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:52 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill this line, cmp no longer exists Message-ID: <20120302093552.98F898204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53092:fcd3bb999210 Date: 2012-03-02 10:31 +0100 http://bitbucket.org/pypy/pypy/changeset/fcd3bb999210/ Log: kill this line, cmp no longer exists diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -108,7 +108,6 @@ assert (x <= y) == (list(x) <= list(y)) assert (x > y) == (list(x) > list(y)) assert (x >= y) == (list(x) >= list(y)) - assert cmp(x,y) == cmp(list(x),list(y)) def test_extend(self): from _collections import deque From noreply at buildbot.pypy.org Fri Mar 2 10:35:53 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:53 +0100 (CET) Subject: [pypy-commit] pypy default: sys.version is a string, so this test has been evaluated to false for ages. Use sys.version_info instead Message-ID: <20120302093553.C7A098204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53093:32e1793c68ea Date: 2012-03-02 10:34 +0100 http://bitbucket.org/pypy/pypy/changeset/32e1793c68ea/ Log: sys.version is a string, so this test has been evaluated to false for ages. Use sys.version_info instead diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 From noreply at buildbot.pypy.org Fri Mar 2 10:35:55 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:55 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120302093555.15F988204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53094:7e9798e88c4b Date: 2012-03-02 10:34 +0100 http://bitbucket.org/pypy/pypy/changeset/7e9798e88c4b/ Log: hg merge default diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -26,7 +26,7 @@ """ import sys assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -504,12 +504,6 @@ assert (a >= c) is False assert (c >= a) is True - assert a == a - assert a == b - assert a < c - assert b == a - assert c > a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) From noreply at buildbot.pypy.org Fri Mar 2 10:35:56 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 10:35:56 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120302093556.B9AF68204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53095:ee7e8bd31c25 Date: 2012-03-02 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/ee7e8bd31c25/ Log: merge heads diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) From noreply at buildbot.pypy.org Fri Mar 2 11:25:07 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:25:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: we can no longer sort mixed types. Use set() instead of sorted() to compare the content of the lists Message-ID: <20120302102507.860948204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53096:3af950e78b9e Date: 2012-03-02 11:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3af950e78b9e/ Log: we can no longer sort mixed types. Use set() instead of sorted() to compare the content of the lists diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -404,7 +404,7 @@ # this raised TypeError on ListStrategies l1 = ["a", "2", True, "a"] l2 = [1, "2", "a", "a"] - assert sorted(l1) == sorted(l2) + assert set(l1) == set(l2) def test_notequals(self): assert [1,2,3,4] != [1,2,5,4] From noreply at buildbot.pypy.org Fri Mar 2 11:25:08 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:25:08 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill the cmp argument to list.sort() and sorted() Message-ID: <20120302102508.C0D728204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53097:084d689a182e Date: 2012-03-02 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/084d689a182e/ Log: kill the cmp argument to list.sort() and sorted() diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -11,10 +11,10 @@ # ____________________________________________________________ -def sorted(lst, cmp=None, key=None, reverse=None): - "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" +def sorted(lst, key=None, reverse=None): + "sorted(iterable, key=None, reverse=False) --> new sorted list" sorted_lst = list(lst) - sorted_lst.sort(cmp, key, reverse) + sorted_lst.sort(key=key, reverse=reverse) return sorted_lst def any(seq): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1364,20 +1364,6 @@ def lt(self, a, b): return a < b -class CustomCompareSort(SimpleSort): - def lt(self, a, b): - space = self.space - w_cmp = self.w_cmp - w_result = space.call_function(w_cmp, a, b) - try: - result = space.int_w(w_result) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise OperationError(space.w_TypeError, - space.wrap("comparison function must return int")) - raise - return result < 0 - class CustomKeySort(SimpleSort): def lt(self, a, b): assert isinstance(a, KeyContainer) @@ -1385,24 +1371,16 @@ space = self.space return space.is_true(space.lt(a.w_key, b.w_key)) -class CustomKeyCompareSort(CustomCompareSort): - def lt(self, a, b): - assert isinstance(a, KeyContainer) - assert isinstance(b, KeyContainer) - return CustomCompareSort.lt(self, a.w_key, b.w_key) +def list_sort__List_ANY_ANY(space, w_list, w_keyfunc, w_reverse): -def list_sort__List_ANY_ANY_ANY(space, w_list, w_cmp, w_keyfunc, w_reverse): - - has_cmp = not space.is_w(w_cmp, space.w_None) has_key = not space.is_w(w_keyfunc, space.w_None) has_reverse = space.is_true(w_reverse) # create and setup a TimSort instance - if has_cmp: - if has_key: - sorterclass = CustomKeyCompareSort - else: - sorterclass = CustomCompareSort + if 0: + # this was the old "if has_cmp" path. We didn't remove the if not to + # diverge too much from default, to avoid spurious conflicts + pass else: if has_key: sorterclass = CustomKeySort @@ -1415,7 +1393,6 @@ sorter = sorterclass(w_list.getitems(), w_list.length()) sorter.space = space - sorter.w_cmp = w_cmp try: # The list is temporarily made empty, so that mutations performed diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -24,10 +24,10 @@ ' occurrences of value') list_reverse = SMM('reverse',1, doc='L.reverse() -- reverse *IN PLACE*') -list_sort = SMM('sort', 4, defaults=(None, None, False), - argnames=['cmp', 'key', 'reverse'], - doc='L.sort(cmp=None, key=None, reverse=False) -- stable' - ' sort *IN PLACE*;\ncmp(x, y) -> -1, 0, 1') +list_sort = SMM('sort', 3, defaults=(None, None, False), + argnames=['key', 'reverse'], + doc='L.sort(key=None, reverse=False) -- stable' + ' sort *IN PLACE*') list_reversed = SMM('__reversed__', 1, doc='L.__reversed__() -- return a reverse iterator over' ' the list') diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -543,18 +543,6 @@ l.sort() assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] - def test_sort_cmp(self): - def lencmp(a,b): return cmp(len(a), len(b)) - l = [ 'a', 'fiver', 'tre', '' ] - l.sort(lencmp) - assert l == ['', 'a', 'tre', 'fiver'] - l = [] - l.sort(lencmp) - assert l == [] - l = [ 'a' ] - l.sort(lencmp) - assert l == [ 'a' ] - def test_sort_key(self): def lower(x): return x.lower() l = ['a', 'C', 'b'] From noreply at buildbot.pypy.org Fri Mar 2 11:25:09 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:25:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: we have one more test now, adapt the expected number of passed ones Message-ID: <20120302102509.F3A5D8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53098:e1d8fdaa2ed9 Date: 2012-03-02 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/e1d8fdaa2ed9/ Log: we have one more test now, adapt the expected number of passed ones diff --git a/pypy/tool/pytest/test/test_conftest1.py b/pypy/tool/pytest/test/test_conftest1.py --- a/pypy/tool/pytest/test/test_conftest1.py +++ b/pypy/tool/pytest/test/test_conftest1.py @@ -17,7 +17,7 @@ def test_selection_by_keyword_app(self, testdir): sorter = testdir.inline_run("-k", "applevel -docstring", innertest) passed, skipped, failed = sorter.listoutcomes() - assert len(passed) == 2 + assert len(passed) == 3 assert failed == [] assert skipped == [] assert "app_test_something" in passed[0].nodeid @@ -27,7 +27,7 @@ sorter = testdir.inline_run(innertest, '-k', 'applevel -docstring', '--runappdirect') passed, skipped, failed = sorter.listoutcomes() - assert len(passed) == 2 + assert len(passed) == 3 print passed assert "app_test_something" in passed[0].nodeid assert "test_method_app" in passed[1].nodeid From noreply at buildbot.pypy.org Fri Mar 2 11:25:11 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:25:11 +0100 (CET) Subject: [pypy-commit] pypy py3k: add docs for the atexit module, and kill a clearly out-of-place comment Message-ID: <20120302102511.350E68204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53099:5a558b1ae15d Date: 2012-03-02 11:24 +0100 http://bitbucket.org/pypy/pypy/changeset/5a558b1ae15d/ Log: add docs for the atexit module, and kill a clearly out-of-place comment diff --git a/pypy/doc/config/objspace.usemodules.atexit.txt b/pypy/doc/config/objspace.usemodules.atexit.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules.atexit.txt @@ -0,0 +1,2 @@ +Use the 'atexit' module. +Allow programmer to define multiple exit functions to be executed upon normal program termination. diff --git a/pypy/module/atexit/__init__.py b/pypy/module/atexit/__init__.py --- a/pypy/module/atexit/__init__.py +++ b/pypy/module/atexit/__init__.py @@ -1,8 +1,3 @@ -"""A _string module, to export formatter_parser and - formatter_field_name_split to the string.Formatter class - implemented in Python.""" - - from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): From noreply at buildbot.pypy.org Fri Mar 2 11:37:56 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:37:56 +0100 (CET) Subject: [pypy-commit] pypy core-only-tracing: bah, actually fix rpython Message-ID: <20120302103756.89C3A8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: core-only-tracing Changeset: r53100:5ecd8a0b0a52 Date: 2012-01-20 16:35 +0100 http://bitbucket.org/pypy/pypy/changeset/5ecd8a0b0a52/ Log: bah, actually fix rpython diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -904,8 +904,9 @@ funcname = op.args[2].value key = jd, funcname if key not in closures: + is_string = funcname in ('enable_opts', 'jitmode') closures[key] = make_closure(jd, 'set_param_' + funcname, - funcname == 'enable_opts') + is_string) op.opname = 'direct_call' op.args[:3] = [closures[key]] diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -151,10 +151,12 @@ for key, w_value in kwds_w.items(): if key == 'enable_opts': jit.set_param(None, 'enable_opts', space.str_w(w_value)) + elif key == 'jitmode': + jit.set_param(None, 'jitmode', space.str_w(w_value)) else: intval = space.int_w(w_value) for name, _ in unroll_parameters: - if name == key and name != 'enable_opts': + if name == key and name != 'enable_opts' and name != 'jitmode': jit.set_param(None, name, intval) break else: diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -527,11 +527,13 @@ raise ValueError name = parts[0] value = parts[1] - if name == 'enable_opts' or name == 'jitmode': - set_param(driver, name, value) + if name == 'enable_opts': + set_param(driver, 'enable_opts', value) + elif name == 'jitmode': + set_param(driver, 'jitmode', value) else: for name1, _ in unroll_parameters: - if name1 == name and name1 != 'enable_opts': + if name1 == name and name1 != 'enable_opts' and name1 != 'jitmode': try: set_param(driver, name1, int(value)) except ValueError: @@ -717,7 +719,7 @@ from pypy.annotation import model as annmodel assert s_name.is_constant() if not self.bookkeeper.immutablevalue(DEFAULT).contains(s_value): - if s_name.const == 'enable_opts': + if s_name.const in ('enable_opts', 'jitmode'): assert annmodel.SomeString(can_be_None=True).contains(s_value) else: assert annmodel.SomeInteger().contains(s_value) @@ -731,7 +733,7 @@ hop.exception_cannot_occur() driver = hop.inputarg(lltype.Void, arg=0) name = hop.args_s[1].const - if name == 'enable_opts': + if name in ('enable_opts', 'jitmode'): repr = string_repr else: repr = lltype.Signed From noreply at buildbot.pypy.org Fri Mar 2 11:37:57 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:37:57 +0100 (CET) Subject: [pypy-commit] pypy core-only-tracing: (antocuni, arigo): mark more modules as is_core, because they really contain extensions to pyopcode.py Message-ID: <20120302103757.B97338204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: core-only-tracing Changeset: r53101:46d4d363c415 Date: 2012-01-20 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/46d4d363c415/ Log: (antocuni, arigo): mark more modules as is_core, because they really contain extensions to pyopcode.py diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -153,5 +153,10 @@ def is_core_function(self, func): mod = func.__module__ or '?' - is_interpreter = mod.startswith('pypy.interpreter.') - return is_interpreter or mod.startswith('pypy.module.pypyjit.') + fname = func.func_name + is_interpreter = (mod.startswith('pypy.interpreter.') or + mod.startswith('pypy.objspace.std.frame') or + mod.startswith('pypy.objspace.std.callmethod') or + (mod.startswith('pypy.objspace.std.mapdict') and + (fname.startswith('LOOKUP') or fname.startswith('LOAD')))) + return is_interpreter or mod.startswith('pypy.module.pypyjit') From noreply at buildbot.pypy.org Fri Mar 2 11:43:37 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 11:43:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: cStringIO no longer exists Message-ID: <20120302104337.470D78204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53102:155b1c49f9e5 Date: 2012-03-02 11:42 +0100 http://bitbucket.org/pypy/pypy/changeset/155b1c49f9e5/ Log: cStringIO no longer exists diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -30,7 +30,7 @@ ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", - "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", + "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "_bisect", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", From noreply at buildbot.pypy.org Fri Mar 2 11:57:45 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 11:57:45 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: more cleanups Message-ID: <20120302105745.51D808204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53103:7af263689381 Date: 2012-03-02 02:56 -0800 http://bitbucket.org/pypy/pypy/changeset/7af263689381/ Log: more cleanups diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py --- a/pypy/jit/backend/ppc/codebuilder.py +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -5,7 +5,6 @@ from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64) import pypy.jit.backend.ppc.register as r from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin -from pypy.jit.backend.llsupport.regalloc import RegisterManager from pypy.rpython.lltypesystem import lltype, rffi from pypy.jit.metainterp.resoperation import rop from pypy.tool.udir import udir diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -9,14 +9,12 @@ FPR_SAVE_AREA, FLOAT_INT_CONVERSION, FORCE_INDEX, SIZE_LOAD_IMM_PATCH_SP) -from pypy.jit.backend.ppc.helper.assembler import (gen_emit_cmp_op, - decode64, Saved_Volatiles) +from pypy.jit.backend.ppc.helper.assembler import Saved_Volatiles from pypy.jit.backend.ppc.helper.regalloc import _check_imm_arg import pypy.jit.backend.ppc.register as r import pypy.jit.backend.ppc.condition as c from pypy.jit.metainterp.history import AbstractFailDescr from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity from pypy.jit.backend.model import CompiledLoopToken from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.jit.metainterp.resoperation import rop @@ -185,7 +183,6 @@ bytecode = rffi.cast(rffi.UCHARP, mem_loc) num = 0 value = 0 - fvalue = 0 code_inputarg = False while True: code = rffi.cast(lltype.Signed, bytecode[0]) @@ -522,11 +519,9 @@ operations = self.setup(looptoken, operations) self.startpos = self.mc.currpos() - longevity = compute_vars_longevity(inputargs, operations) regalloc = Regalloc(assembler=self, frame_manager=PPCFrameManager()) regalloc.prepare_loop(inputargs, operations) - regalloc_head = self.mc.currpos() start_pos = self.mc.currpos() looptoken._ppc_loop_code = start_pos @@ -584,7 +579,6 @@ operations = self.setup(looptoken, operations) assert isinstance(faildescr, AbstractFailDescr) code = self._find_failure_recovery_bytecode(faildescr) - frame_depth = faildescr._ppc_frame_depth arglocs = self.decode_inputargs(code) if not we_are_translated(): assert len(inputargs) == len(arglocs) diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py --- a/pypy/jit/backend/ppc/regalloc.py +++ b/pypy/jit/backend/ppc/regalloc.py @@ -912,28 +912,6 @@ arglocs.append(t) return arglocs - # from ../x86/regalloc.py:791 - def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) - ofs = fielddescr.offset - size = fielddescr.get_field_size(self.cpu.translate_support_code) - ptr = fielddescr.is_pointer_field() - return ofs, size, ptr - - # from ../x86/regalloc.py:779 - def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - cpu = self.cpu - ofs_length = arraydescr.get_ofs_length(cpu.translate_support_code) - ofs = arraydescr.get_base_size(cpu.translate_support_code) - size = arraydescr.get_item_size(cpu.translate_support_code) - ptr = arraydescr.is_array_of_pointers() - scale = 0 - while (1 << scale) < size: - scale += 1 - assert (1 << scale) == size - return size, scale, ofs, ofs_length, ptr - def prepare_force_spill(self, op): self.force_spill_var(op.getarg(0)) return [] From notifications-noreply at bitbucket.org Fri Mar 2 12:02:11 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Fri, 02 Mar 2012 11:02:11 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20120302110211.22942.25998@bitbucket01.managed.contegix.com> You have received a notification from oberstet. Hi, I forked pypy. My fork is at https://bitbucket.org/oberstet/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Fri Mar 2 12:16:34 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 12:16:34 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: port test from ARM backend Message-ID: <20120302111634.5A86B8204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53104:049cb55075c8 Date: 2012-03-02 03:15 -0800 http://bitbucket.org/pypy/pypy/changeset/049cb55075c8/ Log: port test from ARM backend diff --git a/pypy/jit/backend/ppc/test/test_generated.py b/pypy/jit/backend/ppc/test/test_generated.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/test/test_generated.py @@ -0,0 +1,525 @@ +import py +from pypy.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, + BasicFailDescr, + BoxInt, Box, BoxPtr, + ConstInt, ConstPtr, + BoxObj, Const, + ConstObj, BoxFloat, ConstFloat) +from pypy.jit.metainterp.history import JitCellToken +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.rpython.test.test_llinterp import interpret +from pypy.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +class TestStuff(object): + + def test0(self): + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_SUB, [ConstInt(-1073741824), v7], v11), + ResOperation(rop.INT_GE, [v3, ConstInt(23)], v12), + ResOperation(rop.GUARD_TRUE, [v12], None, descr=faildescr1), + ResOperation(rop.FINISH, [v9, v6, v10, v2, v8, v5, v1, v4], None, descr=faildescr2), + ] + looptoken = JitCellToken() + operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) + cpu.compile_loop(inputargs, operations, looptoken) + args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] + op = cpu.execute_token(looptoken, *args) + assert cpu.get_latest_value_int(0) == 0 + assert cpu.get_latest_value_int(1) == 62 + assert cpu.get_latest_value_int(2) == -19 + assert cpu.get_latest_value_int(3) == -26 + assert cpu.get_latest_value_int(4) == -12 + assert cpu.get_latest_value_int(5) == -1073741787 + + def test_overflow(self): + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + v16 = BoxInt() + v17 = BoxInt() + v18 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_SUB, [ConstInt(21), v5], v11), + ResOperation(rop.INT_MUL_OVF, [v8, v4], v12), + ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), + ResOperation(rop.UINT_LT, [v10, v3], v13), + ResOperation(rop.INT_IS_TRUE, [v3], v14), + ResOperation(rop.INT_XOR, [v9, v8], v15), + ResOperation(rop.INT_LE, [v12, v6], v16), + ResOperation(rop.UINT_GT, [v15, v5], v17), + ResOperation(rop.UINT_LE, [ConstInt(-9), v13], v18), + ResOperation(rop.GUARD_FALSE, [v13], None, descr=faildescr2), + ResOperation(rop.FINISH, [v7, v1, v2], None, descr=faildescr3), + ] + operations[2].setfailargs([v10, v6]) + operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] + op = cpu.execute_token(looptoken, *args) + assert cpu.get_latest_value_int(0) == 105 + assert cpu.get_latest_value_int(1) == 63 + assert cpu.get_latest_value_int(2) == 0 + assert cpu.get_latest_value_int(3) == 0 + assert cpu.get_latest_value_int(4) == 16 + assert cpu.get_latest_value_int(5) == 1 + assert cpu.get_latest_value_int(6) == 16 + + def test_sub_with_neg_const_first_arg(self): + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + tmp13 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_EQ, [ConstInt(17), v9], v11), + ResOperation(rop.INT_SUB_OVF, [ConstInt(-32), v7], v12), + ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), + ResOperation(rop.INT_IS_ZERO, [v12], tmp13), + ResOperation(rop.GUARD_TRUE, [tmp13], None, descr=faildescr2), + ResOperation(rop.FINISH, [v5, v2, v1, v10, v3, v8, v4, v6], None, descr=faildescr3) + ] + operations[2].setfailargs([v8, v3]) + operations[4].setfailargs([v2, v12, v1, v3, v4]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 2 + assert cpu.get_latest_value_int(0) == 24 + assert cpu.get_latest_value_int(1) == -32 + assert cpu.get_latest_value_int(2) == -5 + assert cpu.get_latest_value_int(3) == 46 + assert cpu.get_latest_value_int(4) == -15 + + def test_tempbox_spilling_in_sub(self): + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_LT, [v9, v9], v11), + ResOperation(rop.INT_ADD, [ConstInt(715827882), v4], v12), + ResOperation(rop.INT_NEG, [v11], v13), + ResOperation(rop.INT_IS_TRUE, [v3], v14), + ResOperation(rop.INT_SUB_OVF, [v3, ConstInt(-95)], v15), + ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), + ResOperation(rop.FINISH, [v8, v2, v6, v5, v7, v1, v10], None, descr=faildescr2), + ] + operations[5].setfailargs([]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] + op = cpu.execute_token(looptoken, *args) + assert cpu.get_latest_value_int(0) == -29 + assert cpu.get_latest_value_int(1) == -3 + assert cpu.get_latest_value_int(2) == 22 + assert cpu.get_latest_value_int(3) == 12 + assert cpu.get_latest_value_int(4) == -54 + assert cpu.get_latest_value_int(5) == 19 + assert cpu.get_latest_value_int(6) == -64 + + def test_tempbox2(self): + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_LT, [v5, ConstInt(-67)], v11), + ResOperation(rop.INT_INVERT, [v2], v12), + ResOperation(rop.INT_SUB, [ConstInt(-45), v2], v13), + ResOperation(rop.INT_SUB, [ConstInt(99), v6], v14), + ResOperation(rop.INT_MUL_OVF, [v6, v9], v15), + ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), + ResOperation(rop.FINISH, [v1, v4, v10, v8, v7, v3], None, descr=faildescr2), + ] + looptoken = JitCellToken() + operations[5].setfailargs([]) + cpu.compile_loop(inputargs, operations, looptoken) + args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] + op = cpu.execute_token(looptoken, *args) + assert cpu.get_latest_value_int(0) == 1073741824 + assert cpu.get_latest_value_int(1) == 5 + assert cpu.get_latest_value_int(2) == -63 + assert cpu.get_latest_value_int(3) == 17 + assert cpu.get_latest_value_int(4) == 32 + assert cpu.get_latest_value_int(5) == -16 + + def test_wrong_guard(self): + # generated by: + # ../test/ test/test_zll_random.py -l -k arm -s --block-length=10 --random-seed=4338 + + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + faildescr4 = BasicFailDescr(4) + v1 = BoxInt(32) + v2 = BoxInt(41) + v3 = BoxInt(-9) + v4 = BoxInt(12) + v5 = BoxInt(-18) + v6 = BoxInt(46) + v7 = BoxInt(15) + v8 = BoxInt(17) + v9 = BoxInt(10) + v10 = BoxInt(12) + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + tmp15 = BoxInt() + tmp16 = BoxInt() + tmp17 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_IS_TRUE, [v1], tmp15), + ResOperation(rop.GUARD_TRUE, [tmp15], None, descr=faildescr1), + ResOperation(rop.INT_GT, [v4, v5], v11), + ResOperation(rop.INT_XOR, [ConstInt(-4), v7], v12), + ResOperation(rop.INT_MUL, [ConstInt(23), v11], v13), + ResOperation(rop.UINT_GE, [ConstInt(1), v13], v14), + ResOperation(rop.INT_IS_ZERO, [v14], tmp16), + ResOperation(rop.GUARD_TRUE, [tmp16], None, descr=faildescr2), + ResOperation(rop.INT_IS_TRUE, [v12], tmp17), + ResOperation(rop.GUARD_FALSE, [tmp17], None, descr=faildescr3), + ResOperation(rop.FINISH, [v8, v10, v6, v3, v2, v9], None, descr=faildescr4), + ] + looptoken = JitCellToken() + operations[1].setfailargs([v8, v6, v1]) + operations[7].setfailargs([v4]) + operations[9].setfailargs([v10, v13]) + args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] + cpu.compile_loop(inputargs, operations, looptoken) + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 3 + assert cpu.get_latest_value_int(0) == 12 + assert cpu.get_latest_value_int(1) == 23 + + def test_wrong_guard2(self): + # random seed: 8029 + # block length: 10 + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + v16 = BoxInt() + tmp17 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_ADD_OVF, [v8, ConstInt(-30)], v11), + ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), + ResOperation(rop.UINT_LE, [v11, v1], v12), + ResOperation(rop.INT_AND, [v11, ConstInt(31)], tmp17), + ResOperation(rop.UINT_RSHIFT, [v12, tmp17], v13), + ResOperation(rop.INT_NE, [v3, v2], v14), + ResOperation(rop.INT_NE, [ConstInt(1), v11], v15), + ResOperation(rop.INT_NE, [ConstInt(23), v15], v16), + ResOperation(rop.GUARD_FALSE, [v15], None, descr=faildescr2), + ResOperation(rop.FINISH, [v4, v10, v6, v5, v9, v7], None, descr=faildescr3), + ] + operations[1].setfailargs([v6, v8, v1, v4]) + operations[8].setfailargs([v5, v9]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 2 + assert cpu.get_latest_value_int(0) == 16 + assert cpu.get_latest_value_int(1) == -1 + + def test_wrong_guard3(self): + # random seed: 8029 + # block length: 10 + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + faildescr4 = BasicFailDescr(4) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + v16 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.UINT_LT, [ConstInt(-11), v7], v11), + ResOperation(rop.INT_GE, [v3, v5], v12), + ResOperation(rop.INT_INVERT, [v9], v13), + ResOperation(rop.GUARD_VALUE, [v13, ConstInt(14)], None, descr=faildescr3), + ResOperation(rop.INT_IS_ZERO, [v12], v14), + ResOperation(rop.INT_SUB, [v2, v13], v15), + ResOperation(rop.GUARD_VALUE, [v15, ConstInt(-32)], None, descr=faildescr4), + ResOperation(rop.INT_FLOORDIV, [v3, ConstInt(805306366)], v16), + ResOperation(rop.GUARD_VALUE, [v15, ConstInt(0)], None, descr=faildescr1), + ResOperation(rop.FINISH, [v10, v8, v1, v6, v4], None, descr=faildescr2), + ] + operations[3].setfailargs([]) + operations[-4].setfailargs([v15]) + operations[-2].setfailargs([v9, v4, v10, v11, v14]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 1 + assert cpu.get_latest_value_int(0) == -15 + assert cpu.get_latest_value_int(1) == -9 + assert cpu.get_latest_value_int(2) == 536870912 + assert cpu.get_latest_value_int(3) == 0 + assert cpu.get_latest_value_int(4) == 0 + + def test_wrong_result(self): + # generated by: + # ../test/ test/test_zll_random.py -l -k arm -s --block-length=10 --random-seed=7389 + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + faildescr3 = BasicFailDescr(3) + faildescr4 = BasicFailDescr(4) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + tmp16 = BoxInt() + tmp17 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_IS_TRUE, [v3], tmp16), + ResOperation(rop.GUARD_TRUE, [tmp16], None, descr=faildescr1), + ResOperation(rop.INT_AND, [v7, ConstInt(31)], tmp17), + ResOperation(rop.INT_RSHIFT, [v5, tmp17], v11), + ResOperation(rop.INT_OR, [v6, v8], v12), + ResOperation(rop.GUARD_VALUE, [v11, ConstInt(-2)], None, descr=faildescr2), + ResOperation(rop.INT_LE, [ConstInt(1789569706), v10], v13), + ResOperation(rop.INT_IS_TRUE, [v4], v14), + ResOperation(rop.INT_XOR, [v14, v3], v15), + ResOperation(rop.GUARD_VALUE, [v8, ConstInt(-8)], None, descr=faildescr3), + ResOperation(rop.FINISH, [v1, v2, v9], None, descr=faildescr4), + ] + operations[1].setfailargs([v9, v1]) + operations[5].setfailargs([v10, v2, v11, v3]) + operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 3 + assert cpu.get_latest_value_int(0) == -4 + assert cpu.get_latest_value_int(1) == -95 + assert cpu.get_latest_value_int(2) == 45 + assert cpu.get_latest_value_int(3) == 1 + assert cpu.get_latest_value_int(4) == -2 + assert cpu.get_latest_value_int(5) == 0 + assert cpu.get_latest_value_int(6) == 33 + + def test_int_add(self): + # random seed: 1202 + # block length: 4 + # AssertionError: Got 1431655764, expected 357913940 for value #3 + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + tmp12 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_ADD, [ConstInt(-1073741825), v3], v11), + ResOperation(rop.INT_IS_TRUE, [v1], tmp12), + ResOperation(rop.GUARD_FALSE, [tmp12], None, descr=faildescr1), + ResOperation(rop.FINISH, [v8, v2, v10, v6, v7, v9, v5, v4], None, descr=faildescr2), + ] + operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 1 + assert cpu.get_latest_value_int(0) == -43 + assert cpu.get_latest_value_int(1) == 1431655765 + assert cpu.get_latest_value_int(2) == 1789569706 + assert cpu.get_latest_value_int(3) == 357913940 + assert cpu.get_latest_value_int(4) == 16 + assert cpu.get_latest_value_int(5) == -5 + + def test_wrong_result2(self): + # block length 10 + # random seed 1 + f1 = BasicFailDescr(1) + f2 = BasicFailDescr(2) + f3 = BasicFailDescr(3) + v1 = BoxInt() + v2 = BoxInt() + v3 = BoxInt() + v4 = BoxInt() + v5 = BoxInt() + v6 = BoxInt() + v7 = BoxInt() + v8 = BoxInt() + v9 = BoxInt() + v10 = BoxInt() + v11 = BoxInt() + v12 = BoxInt() + v13 = BoxInt() + v14 = BoxInt() + v15 = BoxInt() + cpu = CPU(None, None) + cpu.setup_once() + inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] + operations = [ + ResOperation(rop.INT_LE, [v6, v1], v11), + ResOperation(rop.SAME_AS, [ConstInt(-14)], v12), + ResOperation(rop.INT_ADD, [ConstInt(24), v4], v13), + ResOperation(rop.UINT_RSHIFT, [v6, ConstInt(0)], v14), + ResOperation(rop.GUARD_VALUE, [v14, ConstInt(1)], None, descr=f3), + ResOperation(rop.INT_MUL, [v13, ConstInt(12)], v15), + ResOperation(rop.GUARD_FALSE, [v11], None, descr=f1), + ResOperation(rop.FINISH, [v2, v3, v5, v7, v10, v8, v9], None, descr=f2), + ] + operations[-2].setfailargs([v4, v10, v3, v9, v14, v2]) + operations[4].setfailargs([v14]) + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] + op = cpu.execute_token(looptoken, *args) + assert op.identifier == 1 + assert cpu.get_latest_value_int(0) == -2058005163 + assert cpu.get_latest_value_int(1) == 19 + assert cpu.get_latest_value_int(2) == 18 + assert cpu.get_latest_value_int(3) == 0 + assert cpu.get_latest_value_int(4) == 1 + assert cpu.get_latest_value_int(5) == -20 From noreply at buildbot.pypy.org Fri Mar 2 12:36:32 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 12:36:32 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove obsolete information about old guard encoding style Message-ID: <20120302113632.D18E18204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53105:4c2db371ef83 Date: 2012-03-02 03:35 -0800 http://bitbucket.org/pypy/pypy/changeset/4c2db371ef83/ Log: remove obsolete information about old guard encoding style diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -66,16 +66,6 @@ class AssemblerPPC(OpAssembler): - FLOAT_TYPE = '\xED' - REF_TYPE = '\xEE' - INT_TYPE = '\xEF' - - STACK_LOC = '\xFC' - IMM_LOC = '\xFD' - # REG_LOC is empty - EMPTY_LOC = '\xFE' - END_OF_LOCS = '\xFF' - FORCE_INDEX_AREA = len(r.MANAGED_REGS) * WORD ENCODING_AREA = len(r.MANAGED_REGS) * WORD OFFSET_SPP_TO_GPR_SAVE_AREA = (FORCE_INDEX + FLOAT_INT_CONVERSION From noreply at buildbot.pypy.org Fri Mar 2 13:13:49 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 13:13:49 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: emit some information when hitting not implemented operations Message-ID: <20120302121349.05EE68204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53106:285f2e51cbcc Date: 2012-03-02 04:12 -0800 http://bitbucket.org/pypy/pypy/changeset/285f2e51cbcc/ Log: emit some information when hitting not implemented operations diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -1070,10 +1070,13 @@ assert 0, "not implemented yet" def notimplemented_op(self, op, arglocs, regalloc): - raise NotImplementedError, op + print "[PPC/asm] %s not implemented" % op.getopname() + raise NotImplementedError(op) def notimplemented_op_with_guard(self, op, guard_op, arglocs, regalloc): - raise NotImplementedError, op + print "[PPC/asm] %s with guard %s not implemented" % \ + (op.getopname(), guard_op.getopname()) + raise NotImplementedError(op) operations = [notimplemented_op] * (rop._LAST + 1) operations_with_guard = [notimplemented_op_with_guard] * (rop._LAST + 1) diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py --- a/pypy/jit/backend/ppc/regalloc.py +++ b/pypy/jit/backend/ppc/regalloc.py @@ -920,11 +920,15 @@ return lambda self, op: fn(self, op, None) def notimplemented(self, op): - raise NotImplementedError, op + print "[PPC/regalloc] %s not implemented" % op.getopname() + raise NotImplementedError(op) def notimplemented_with_guard(self, op, guard_op): + print "[PPC/regalloc] %s with guard %s not implemented" % \ + (op.getopname(), guard_op.getopname()) + raise NotImplementedError(op) - raise NotImplementedError, op + operations = [notimplemented] * (rop._LAST + 1) operations_with_guard = [notimplemented_with_guard] * (rop._LAST + 1) From noreply at buildbot.pypy.org Fri Mar 2 13:33:00 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 13:33:00 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: use setup_method instead of setup_class Message-ID: <20120302123300.B45A08204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53107:a3fb941acdca Date: 2012-03-02 04:32 -0800 http://bitbucket.org/pypy/pypy/changeset/a3fb941acdca/ Log: use setup_method instead of setup_class diff --git a/pypy/jit/backend/ppc/test/test_runner.py b/pypy/jit/backend/ppc/test/test_runner.py --- a/pypy/jit/backend/ppc/test/test_runner.py +++ b/pypy/jit/backend/ppc/test/test_runner.py @@ -19,9 +19,9 @@ class TestPPC(LLtypeBackendTest): - def setup_class(cls): - cls.cpu = PPC_64_CPU(rtyper=None, stats=FakeStats()) - cls.cpu.setup_once() + def setup_method(self, meth): + self.cpu = PPC_64_CPU(rtyper=None, stats=FakeStats()) + self.cpu.setup_once() def test_compile_loop_many_int_args(self): for numargs in range(2, 16): From noreply at buildbot.pypy.org Fri Mar 2 14:03:12 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 14:03:12 +0100 (CET) Subject: [pypy-commit] pypy py3k: we cannot push frame.last_exception directly to the valuestack: instead, we wrap it inside a tiny operation error wrapper Message-ID: <20120302130312.A23088204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53108:1466c3172b9b Date: 2012-03-02 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/1466c3172b9b/ Log: we cannot push frame.last_exception directly to the valuestack: instead, we wrap it inside a tiny operation error wrapper diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -469,3 +469,14 @@ def typed_unwrap_error_msg(space, expected, w_obj): type_name = space.type(w_obj).getname(space) return space.wrap("expected %s, got %s object" % (expected, type_name)) + + +from pypy.interpreter.baseobjspace import Wrappable + +class W_OperationError(Wrappable): + """ + Tiny applevel wrapper around an OperationError. + """ + + def __init__(self, operr): + self.operr = operr diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -529,10 +529,16 @@ self.setdictscope(w_locals) def POP_EXCEPT(self, oparg, next_instr): + from pypy.interpreter.error import W_OperationError assert self.space.py3k # on CPython, POP_EXCEPT also pops the block. Here, the block is # automatically popped by unrollstack() - self.last_exception = self.popvalue() + w_last_exception = self.popvalue() + if not isinstance(w_last_exception, W_OperationError): + msg = "expected an OperationError, got %s" % ( + self.space.str_w(w_last_exception)) + raise BytecodeCorruption(msg) + self.last_exception = w_last_exception.operr def POP_BLOCK(self, oparg, next_instr): block = self.pop_block() @@ -1277,8 +1283,11 @@ # instead of the traceback, we store the unroller object, # wrapped. if frame.space.py3k: + from pypy.interpreter.error import W_OperationError # this is popped by POP_EXCEPT, which is present only in py3k - frame.pushvalue(frame.last_exception) + w_last_exception = W_OperationError(frame.last_exception) + w_last_exception = frame.space.wrap(w_last_exception) + frame.pushvalue(w_last_exception) frame.pushvalue(frame.space.wrap(unroller)) frame.pushvalue(operationerr.get_w_value(frame.space)) frame.pushvalue(operationerr.w_type) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -6,7 +6,7 @@ from pypy.interpreter.gateway import interp2app, BuiltinCode from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import Wrappable, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, operationerrfmt, W_OperationError from pypy.tool.sourcetools import compile2, func_with_new_name from pypy.rlib.objectmodel import instantiate, compute_identity_hash, specialize from pypy.rlib.jit import promote @@ -940,3 +940,5 @@ SuspendedUnroller.typedef = TypeDef("SuspendedUnroller") SuspendedUnroller.typedef.acceptable_as_base_class = False + +W_OperationError.typedef = TypeDef("OperationError") From noreply at buildbot.pypy.org Fri Mar 2 14:40:12 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 14:40:12 +0100 (CET) Subject: [pypy-commit] pypy py3k: after 145994f7f8f9 we pass the result of getattr(..., '__traceback__') to set_traceback(), which means that the annotator things it's a W_Root. Force the field to be annotated as PyTraceback Message-ID: <20120302134012.B22CE8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53109:0f407c6962c0 Date: 2012-03-02 14:39 +0100 http://bitbucket.org/pypy/pypy/changeset/0f407c6962c0/ Log: after 145994f7f8f9 we pass the result of getattr(..., '__traceback__') to set_traceback(), which means that the annotator things it's a W_Root. Force the field to be annotated as PyTraceback diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -283,6 +283,8 @@ only if the exception really propagates out of this frame, by executioncontext.leave() being called with got_exception=True. """ + from pypy.interpreter.pytraceback import PyTraceback + assert isinstance(traceback, PyTraceback) self._application_traceback = traceback # ____________________________________________________________ From noreply at buildbot.pypy.org Fri Mar 2 14:54:45 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 14:54:45 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: kill unused encoding/decoding functions Message-ID: <20120302135446.009238204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53110:62a9e5c0e0be Date: 2012-03-02 05:53 -0800 http://bitbucket.org/pypy/pypy/changeset/62a9e5c0e0be/ Log: kill unused encoding/decoding functions diff --git a/pypy/jit/backend/ppc/helper/assembler.py b/pypy/jit/backend/ppc/helper/assembler.py --- a/pypy/jit/backend/ppc/helper/assembler.py +++ b/pypy/jit/backend/ppc/helper/assembler.py @@ -52,45 +52,6 @@ self.mc.rlwinm(res.value, res.value, 1, 31, 31) return f -def encode32(mem, i, n): - mem[i+3] = chr(n & 0xFF) - mem[i+2] = chr((n >> 8) & 0xFF) - mem[i+1] = chr((n >> 16) & 0xFF) - mem[i] = chr((n >> 24) & 0xFF) - -# XXX this sign extension looks a bit strange ... -# It is important for PPC64. -def decode32(mem, index): - value = ( ord(mem[index+3]) - | ord(mem[index+2]) << 8 - | ord(mem[index+1]) << 16 - | ord(mem[index]) << 24) - - rffi_value = rffi.cast(rffi.INT, value) - # do sign extension - return rffi.cast(lltype.Signed, rffi_value) - -def encode64(mem, i, n): - mem[i+7] = chr(n & 0xFF) - mem[i+6] = chr((n >> 8) & 0xFF) - mem[i+5] = chr((n >> 16) & 0xFF) - mem[i+4] = chr((n >> 24) & 0xFF) - mem[i+3] = chr((n >> 32) & 0xFF) - mem[i+2] = chr((n >> 40) & 0xFF) - mem[i+1] = chr((n >> 48) & 0xFF) - mem[i] = chr((n >> 56) & 0xFF) - -def decode64(mem, index): - value = ( ord(mem[index+7]) - | ord(mem[index+6]) << 8 - | ord(mem[index+5]) << 16 - | ord(mem[index+4]) << 24 - | ord(mem[index+3]) << 32 - | ord(mem[index+2]) << 40 - | ord(mem[index+1]) << 48 - | ord(mem[index]) << 56) - return intmask(value) - def count_reg_args(args): reg_args = 0 words = 0 diff --git a/pypy/jit/backend/ppc/test/test_helper.py b/pypy/jit/backend/ppc/test/test_helper.py deleted file mode 100644 --- a/pypy/jit/backend/ppc/test/test_helper.py +++ /dev/null @@ -1,25 +0,0 @@ -from pypy.jit.backend.ppc.helper.assembler import (encode32, decode32) - #encode64, decode64) - -def test_encode32(): - mem = [None]*4 - encode32(mem, 0, 1234567) - assert ''.join(mem) == '\x00\x12\xd6\x87' - mem = [None]*4 - encode32(mem, 0, 983040) - assert ''.join(mem) == '\x00\x0F\x00\x00' - -def test_decode32(): - mem = list('\x00\x12\xd6\x87') - assert decode32(mem, 0) == 1234567 - mem = list('\x00\x0F\x00\x00') - assert decode32(mem, 0) == 983040 - mem = list("\x00\x00\x00\x03") - assert decode32(mem, 0) == 3 - -def test_encode32_and_decode32(): - mem = [None] * 4 - for val in [1, 45654, -456456, 123, 99999]: - encode32(mem, 0, val) - assert decode32(mem, 0) == val - From noreply at buildbot.pypy.org Fri Mar 2 15:40:29 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 15:40:29 +0100 (CET) Subject: [pypy-commit] pypy default: we cannot mix space.{eq, lt, gt, ...}: we need to specialize the function Message-ID: <20120302144029.224F38204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53111:228da252ffd5 Date: 2012-03-02 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/228da252ffd5/ Log: we cannot mix space.{eq,lt,gt,...}: we need to specialize the function diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -583,6 +584,7 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods + @specialize.arg(3) def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) From noreply at buildbot.pypy.org Fri Mar 2 15:40:30 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Mar 2012 15:40:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120302144030.6FE608204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53112:88183ba02b87 Date: 2012-03-02 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/88183ba02b87/ Log: hg merge default diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -10,6 +10,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -572,6 +573,7 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods + @specialize.arg(3) def _cmp_impl(space, self, other, space_fn): if isinstance(other, W_ArrayBase): w_lst1 = array_tolist__Array(space, self) From noreply at buildbot.pypy.org Fri Mar 2 16:00:38 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 16:00:38 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix. Message-ID: <20120302150038.4BB698204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53113:18eccde6f1a4 Date: 2012-03-02 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/18eccde6f1a4/ Log: Test and fix. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -160,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -586,29 +588,26 @@ # Compare methods @specialize.arg(3) def _cmp_impl(space, self, other, space_fn): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space_fn(w_lst1, w_lst2) - else: - return space.w_NotImplemented + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) - def eq__Array_ANY(space, self, other): + def eq__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.eq) - def ne__Array_ANY(space, self, other): + def ne__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.ne) - def lt__Array_ANY(space, self, other): + def lt__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.lt) - def le__Array_ANY(space, self, other): + def le__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.le) - def gt__Array_ANY(space, self, other): + def gt__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.gt) - def ge__Array_ANY(space, self, other): + def ge__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -845,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -868,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Fri Mar 2 16:01:45 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 16:01:45 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test. Message-ID: <20120302150145.637DE8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53114:6f07c52e6ee9 Date: 2012-03-02 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6f07c52e6ee9/ Log: Fix the test. diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): From noreply at buildbot.pypy.org Fri Mar 2 16:43:13 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 16:43:13 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: add debug information Message-ID: <20120302154313.281FA8204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53115:5e0e0edb387e Date: 2012-03-02 07:42 -0800 http://bitbucket.org/pypy/pypy/changeset/5e0e0edb387e/ Log: add debug information diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -14,20 +14,22 @@ import pypy.jit.backend.ppc.register as r import pypy.jit.backend.ppc.condition as c from pypy.jit.metainterp.history import AbstractFailDescr +from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.backend.model import CompiledLoopToken from pypy.rpython.lltypesystem import lltype, rffi, llmemory -from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.history import (INT, REF, FLOAT) from pypy.jit.backend.x86.support import values_array from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc from pypy.rpython.annlowlevel import llhelper -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rpython.lltypesystem.lloperation import llop from pypy.jit.backend.ppc.locations import StackLocation, get_spp_offset from pypy.rlib.jit import AsmInfo +from pypy.rlib.objectmodel import compute_unique_id memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, rffi.SIZE_T], lltype.Void, @@ -475,6 +477,55 @@ debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') + # XXX: merge with x86 + def _register_counter(self, tp, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.type = tp + if tp == 'b' or tp == 'e': + struct.number = number + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct + + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + + @specialize.argtype(1) + def _inject_debugging_code(self, looptoken, operations, tp, number): + if self._debug: + # before doing anything, let's increase a counter + s = 0 + for op in operations: + s += op.getopnum() + looptoken._arm_debug_checksum = s + + newoperations = [] + self._append_debugging_code(newoperations, tp, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, 'l', number, + op.getdescr()) + operations = newoperations + return operations + @staticmethod def _release_gil_shadowstack(): before = rffi.aroundstate.before @@ -508,6 +559,10 @@ assert len(set(inputargs)) == len(inputargs) operations = self.setup(looptoken, operations) + + if log: + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) self.startpos = self.mc.currpos() regalloc = Regalloc(assembler=self, frame_manager=PPCFrameManager()) @@ -541,10 +596,11 @@ looptoken._ppc_func_addr = fdescr self.process_pending_guards(loop_start) - if not we_are_translated(): - print 'Loop', inputargs, operations - self.mc._dump_trace(loop_start, 'loop_%s.asm' % self.cpu.total_compiled_loops) - print 'Done assembling loop with token %r' % looptoken + + if log and not we_are_translated(): + self.mc._dump_trace(real_start, + 'loop_%s.asm' % self.cpu.total_compiled_loops) + ops_offset = self.mc.ops_offset self._teardown() @@ -567,6 +623,10 @@ def assemble_bridge(self, faildescr, inputargs, operations, looptoken, log): operations = self.setup(looptoken, operations) + descr_number = self.cpu.get_fail_descr_number(faildescr) + if log: + operations = self._inject_debugging_code(faildescr, operations, + 'b', descr_number) assert isinstance(faildescr, AbstractFailDescr) code = self._find_failure_recovery_bytecode(faildescr) arglocs = self.decode_inputargs(code) @@ -595,12 +655,15 @@ # for the benefit of tests faildescr._ppc_bridge_frame_depth = self.current_clt.frame_depth faildescr._ppc_bridge_param_depth = self.current_clt.param_depth + if log: + self.mc._dump_trace(rawstart, 'bridge_%d.asm' % + self.cpu.total_compiled_bridges) self._patch_sp_offset(sp_patch_location, rawstart) - if not we_are_translated(): - print 'Loop', inputargs, operations - self.mc._dump_trace(rawstart, 'bridge_%s.asm' % self.cpu.total_compiled_loops) - print 'Done assembling bridge with token %r' % looptoken + #if not we_are_translated(): + # print 'Loop', inputargs, operations + # self.mc._dump_trace(rawstart, 'bridge_%s.asm' % self.cpu.total_compiled_loops) + # print 'Done assembling bridge with token %r' % looptoken ops_offset = self.mc.ops_offset self._teardown() diff --git a/pypy/jit/backend/ppc/test/test_runner.py b/pypy/jit/backend/ppc/test/test_runner.py --- a/pypy/jit/backend/ppc/test/test_runner.py +++ b/pypy/jit/backend/ppc/test/test_runner.py @@ -124,3 +124,44 @@ assert fail.identifier == 1 for i in range(10): assert self.cpu.get_latest_value_int(i) == args[i] + + def test_debugger_on(self): + from pypy.rlib import debug + + targettoken, preambletoken = TargetToken(), TargetToken() + loop = """ + [i0] + label(i0, descr=preambletoken) + debug_merge_point('xyz', 0) + i1 = int_add(i0, 1) + i2 = int_ge(i1, 10) + guard_false(i2) [] + label(i1, descr=targettoken) + debug_merge_point('xyz', 0) + i11 = int_add(i1, 1) + i12 = int_ge(i11, 10) + guard_false(i12) [] + jump(i11, descr=targettoken) + """ + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) + debug._log = dlog = debug.DebugLog() + try: + self.cpu.asm.set_debug(True) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) + # check debugging info + struct = self.cpu.asm.loop_run_counters[0] + assert struct.i == 1 + struct = self.cpu.asm.loop_run_counters[1] + assert struct.i == 1 + struct = self.cpu.asm.loop_run_counters[2] + assert struct.i == 9 + self.cpu.finish_once() + finally: + debug._log = None + l0 = ('debug_print', 'entry -1:1') + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l0, l1, l2]) in dlog From noreply at buildbot.pypy.org Fri Mar 2 16:50:30 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 16:50:30 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: kill outcommented lines Message-ID: <20120302155030.711C58204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53116:e1151080d3a0 Date: 2012-03-02 07:49 -0800 http://bitbucket.org/pypy/pypy/changeset/e1151080d3a0/ Log: kill outcommented lines diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -660,10 +660,6 @@ self.cpu.total_compiled_bridges) self._patch_sp_offset(sp_patch_location, rawstart) - #if not we_are_translated(): - # print 'Loop', inputargs, operations - # self.mc._dump_trace(rawstart, 'bridge_%s.asm' % self.cpu.total_compiled_loops) - # print 'Done assembling bridge with token %r' % looptoken ops_offset = self.mc.ops_offset self._teardown() From noreply at buildbot.pypy.org Fri Mar 2 17:10:38 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Fri, 2 Mar 2012 17:10:38 +0100 (CET) Subject: [pypy-commit] pypy pytest: testrunner: only do the monkeypatching in scratchbox if its required Message-ID: <20120302161038.34A5A8204C@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: pytest Changeset: r53117:cea99ad451e6 Date: 2012-03-02 17:04 +0100 http://bitbucket.org/pypy/pypy/changeset/cea99ad451e6/ Log: testrunner: only do the monkeypatching in scratchbox if its required diff --git a/testrunner/scratchbox_runner.py b/testrunner/scratchbox_runner.py --- a/testrunner/scratchbox_runner.py +++ b/testrunner/scratchbox_runner.py @@ -14,14 +14,14 @@ def dry_run_scratchbox(args, cwd, out, timeout=None): return dry_run(args_for_scratchbox(cwd, args), cwd, out, timeout) -import runner -# XXX hack hack hack -dry_run = runner.dry_run -run = runner.run +if __name__ == '__main__': + import runner + # XXX hack hack hack + dry_run = runner.dry_run + run = runner.run -runner.dry_run = dry_run_scratchbox -runner.run = run_scratchbox + runner.dry_run = dry_run_scratchbox + runner.run = run_scratchbox -if __name__ == '__main__': import sys runner.main(sys.argv) From noreply at buildbot.pypy.org Fri Mar 2 17:10:39 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Fri, 2 Mar 2012 17:10:39 +0100 (CET) Subject: [pypy-commit] pypy pytest: testrunner: also generate junitxml for each test driver instance Message-ID: <20120302161039.663CB8204C@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: pytest Changeset: r53118:017c3d40ee44 Date: 2012-03-02 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/017c3d40ee44/ Log: testrunner: also generate junitxml for each test driver instance this is meant to give stdout/err later for use in the buildbot diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -107,7 +107,10 @@ do_dry_run=False, timeout=None, _win32=(sys.platform=='win32')): args = interp + test_driver - args += ['-p', 'resultlog', '--resultlog=%s' % logfname, test] + args += ['-p', 'resultlog', + '--resultlog=%s' % logfname, + '--junitxml=%s.junit' % logfname, + test] args = map(str, args) interp0 = args[0] diff --git a/testrunner/test/test_runner.py b/testrunner/test/test_runner.py --- a/testrunner/test/test_runner.py +++ b/testrunner/test/test_runner.py @@ -119,6 +119,8 @@ 'driver', 'darg', '-p', 'resultlog', '--resultlog=LOGFILE', + '--junitxml=LOGFILE.junit', + 'test_one'] assert self.called == (expected, '/wd', 'out', 'secs') @@ -136,8 +138,9 @@ 'driver', 'darg', '-p', 'resultlog', '--resultlog=LOGFILE', + '--junitxml=LOGFILE.junit', 'test_one'] - + assert self.called[0] == expected assert self.called == (expected, '/wd', 'out', 'secs') assert res == 0 From noreply at buildbot.pypy.org Fri Mar 2 17:11:29 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Mar 2012 17:11:29 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: add test from ARM backend Message-ID: <20120302161129.C97878204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53119:9028367e2e13 Date: 2012-03-02 08:10 -0800 http://bitbucket.org/pypy/pypy/changeset/9028367e2e13/ Log: add test from ARM backend diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -714,6 +714,27 @@ descr=calldescr) assert res.getfloat() == 4.0 + def test_call_box_func(self): + def a(a1, a2): + return a1 + a2 + def b(b1, b2): + return b1 * b2 + + arg1 = 40 + arg2 = 2 + for f in [a, b]: + TP = lltype.Signed + FPTR = self.Ptr(self.FuncType([TP, TP], TP)) + func_ptr = llhelper(FPTR, f) + FUNC = deref(FPTR) + funcconst = self.get_funcbox(self.cpu, func_ptr) + funcbox = funcconst.clonebox() + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + res = self.execute_operation(rop.CALL, + [funcbox, BoxInt(arg1), BoxInt(arg2)], + 'int', descr=calldescr) + assert res.getint() == f(arg1, arg2) def test_field_basic(self): t_box, T_box = self.alloc_instance(self.T) From notifications-noreply at bitbucket.org Fri Mar 2 17:16:22 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Fri, 02 Mar 2012 16:16:22 -0000 Subject: [pypy-commit] Notification: lang-js Message-ID: <20120302161622.30567.64023@bitbucket01.managed.contegix.com> You have received a notification from Rafael Caricio. Hi, I forked lang-js. My fork is at https://bitbucket.org/rjcf/lang-js. -- Disable notifications at https://bitbucket.org/account/notifications/ From notifications-noreply at bitbucket.org Fri Mar 2 17:49:51 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Fri, 02 Mar 2012 16:49:51 -0000 Subject: [pypy-commit] Notification: pypy-llvm Message-ID: <20120302164951.6877.31157@bitbucket05.managed.contegix.com> You have received a notification from Manuel Jacob. Hi, I forked pypy. My fork is at https://bitbucket.org/mjacob/pypy-llvm. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Fri Mar 2 21:24:41 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:41 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: hg merge default Message-ID: <20120302202441.0DB638204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53120:bb90c26d0cd1 Date: 2012-03-02 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/bb90c26d0cd1/ Log: hg merge default diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -159,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -583,30 +586,28 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods + @specialize.arg(3) def _cmp_impl(space, self, other, space_fn): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space_fn(w_lst1, w_lst2) - else: - return space.w_NotImplemented + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) - def eq__Array_ANY(space, self, other): + def eq__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.eq) - def ne__Array_ANY(space, self, other): + def ne__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.ne) - def lt__Array_ANY(space, self, other): + def lt__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.lt) - def le__Array_ANY(space, self, other): + def le__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.le) - def gt__Array_ANY(space, self, other): + def gt__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.gt) - def ge__Array_ANY(space, self, other): + def ge__Array_ArrayBase(space, self, other): return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -845,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -868,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Fri Mar 2 21:24:42 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:42 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fix. Message-ID: <20120302202442.4AA278204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53121:3436f7e6b821 Date: 2012-03-02 15:42 +0000 http://bitbucket.org/pypy/pypy/changeset/3436f7e6b821/ Log: Fix. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2282,8 +2282,7 @@ use_words = (2 + max(asmgcroot.INDEX_OF_EBP, asmgcroot.FRAME_PTR) + 1) pos = self._regalloc.fm.reserve_location_in_frame(use_words) - css = get_ebp_ofs(pos + use_words - 1) - xxxxxxxx # ^^^^ + css = get_ebp_ofs(pos) self._regalloc.close_stack_struct = css # The location where the future CALL will put its return address # will be [ESP-WORD]. But we can't use that as the next frame's From noreply at buildbot.pypy.org Fri Mar 2 21:24:43 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:43 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Turn this magic constant "2" into a CONSTANT. Message-ID: <20120302202443.878A08204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53122:4d76aae321b7 Date: 2012-03-02 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/4d76aae321b7/ Log: Turn this magic constant "2" into a CONSTANT. diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -37,6 +37,7 @@ # order as the one in the comments above); but whereas the real stack would # have the spilled values stored in (ebp-20), (ebp-24), etc., the off-stack # has them stored in (ebp+8), (ebp+12), etc. +OFFSTACK_START_AT_WORD = 2 # # In stacklet mode, the real frame contains always just OFFSTACK_REAL_FRAME # words reserved for temporary usage like call arguments. To maintain diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -24,6 +24,7 @@ from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.jit.backend.x86.arch import OFFSTACK_REAL_FRAME +from pypy.jit.backend.x86.arch import OFFSTACK_START_AT_WORD from pypy.rlib.rarithmetic import r_longlong class X86RegisterManager(RegisterManager): @@ -1550,7 +1551,7 @@ def get_ebp_ofs(position): # Argument is a frame position (0, 1, 2...). # Returns (ebp+8), (ebp+12), (ebp+16)... - return WORD * (2 + position) + return WORD * (OFFSTACK_START_AT_WORD + position) def _valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 From noreply at buildbot.pypy.org Fri Mar 2 21:24:44 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:44 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fix. Message-ID: <20120302202444.B93008204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53123:4d38fc264f1e Date: 2012-03-02 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/4d38fc264f1e/ Log: Fix. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -514,7 +514,7 @@ operations, self.current_clt.allgcrefs) - frame_size_pos = self._enter_bridge_code() + frame_size_pos = self._enter_bridge_code(regalloc) (frame_depth #, param_depth ) = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() @@ -725,15 +725,15 @@ self.mc.LEA32_rb(esp.value, 0) return self.mc.get_relative_pos() - 4 - def _enter_bridge_code(self): + def _enter_bridge_code(self, regalloc): # XXX XXX far too heavy saving and restoring j = 0 if self.cpu.supports_floats: - for reg in self._regalloc.xrm.save_around_call_regs: + for reg in regalloc.xrm.save_around_call_regs: self.mc.MOVSD_sx(j, reg.value) j += 8 # - save_regs = self._regalloc.rm.save_around_call_regs + save_regs = regalloc.rm.save_around_call_regs if IS_X86_32: assert len(save_regs) == 3 self.mc.MOV_sr(j, save_regs[0].value) @@ -768,7 +768,7 @@ # if self.cpu.supports_floats: j = 0 - for reg in self._regalloc.xrm.save_around_call_regs: + for reg in regalloc.xrm.save_around_call_regs: self.mc.MOVSD_xs(reg.value, j) j += 8 # From noreply at buildbot.pypy.org Fri Mar 2 21:24:45 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:45 +0100 (CET) Subject: [pypy-commit] pypy default: Rework a bit the "--help" format, and split the JIT-related help Message-ID: <20120302202445.EF9D48204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53124:935c947b80cf Date: 2012-03-02 20:14 +0100 http://bitbucket.org/pypy/pypy/changeset/935c947b80cf/ Log: Rework a bit the "--help" format, and split the JIT-related help in its own subpage obtained from "--jit help". diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) From noreply at buildbot.pypy.org Fri Mar 2 21:24:47 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:47 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Found the other place that depends on this constant. Message-ID: <20120302202447.33C3A8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53125:22efc5bf3868 Date: 2012-03-02 20:28 +0100 http://bitbucket.org/pypy/pypy/changeset/22efc5bf3868/ Log: Found the other place that depends on this constant. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -14,7 +14,8 @@ from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64, - OFFSTACK_REAL_FRAME) + OFFSTACK_REAL_FRAME, + OFFSTACK_START_AT_WORD) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, @@ -777,7 +778,7 @@ def _patch_stackadjust(self, adr_to_fix, allocated_depth): # patch the requested size in the call to malloc/realloc mc = codebuf.MachineCodeBlockWrapper() - words = FRAME_FIXED_SIZE + 1 + allocated_depth + words = FRAME_FIXED_SIZE-1 + OFFSTACK_START_AT_WORD + allocated_depth mc.writeimm32(words * WORD) mc.copy_to_raw_memory(adr_to_fix) return From noreply at buildbot.pypy.org Fri Mar 2 21:24:48 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:48 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Found and fix an issue with shadowstack. Hard to test :-( Message-ID: <20120302202448.64DD88204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53126:7c389cf9b418 Date: 2012-03-02 20:44 +0100 http://bitbucket.org/pypy/pypy/changeset/7c389cf9b418/ Log: Found and fix an issue with shadowstack. Hard to test :-( diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -758,6 +758,10 @@ # self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) # + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap is not None and gcrootmap.is_shadow_stack: + self._fixup_shadowstack_location(gcrootmap) + # if IS_X86_32: self.mc.ADD_ri(esp.value, 2*WORD) self.mc.POP_r(save_regs[2].value) @@ -898,6 +902,15 @@ else: self.mc.MOV_mr((r13.value, 0), ebx.value) # MOV [r13], ebx + def _fixup_shadowstack_location(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + if rx86.fits_in_32bits(rst): + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + else: + self.mc.MOV_ri(eax.value, rst) # MOV eax, rootstacktop + self.mc.MOV_rm(eax.value, (eax.value, 0)) # MOV eax, [eax] + self.mc.MOV_mr((eax.value, -2*WORD), ebp.value)# MOV [eax-2*WORD], ebp + def _call_footer_shadowstack(self, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): From noreply at buildbot.pypy.org Fri Mar 2 21:24:49 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 21:24:49 +0100 (CET) Subject: [pypy-commit] pypy default: Small improvement on 64-bit asmgcc: addresses are always multiple of 8, so we can save one bit here and make twice as many values take one byte less. Message-ID: <20120302202449.ACE018204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53127:21d7882b8571 Date: 2012-03-02 21:24 +0100 http://bitbucket.org/pypy/pypy/changeset/21d7882b8571/ Log: Small improvement on 64-bit asmgcc: addresses are always multiple of 8, so we can save one bit here and make twice as many values take one byte less. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1393,7 +1393,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -894,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: From noreply at buildbot.pypy.org Fri Mar 2 23:06:13 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 23:06:13 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: hg merge default Message-ID: <20120302220613.396048204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53128:6c61aace1349 Date: 2012-03-02 21:24 +0100 http://bitbucket.org/pypy/pypy/changeset/6c61aace1349/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1418,7 +1418,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -894,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) From noreply at buildbot.pypy.org Fri Mar 2 23:06:14 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Mar 2012 23:06:14 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Yay. This is enough at least to have "test_zrpy_gc -k AsmGcc" pass. Message-ID: <20120302220614.71852820D1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53129:a852412187c3 Date: 2012-03-02 22:17 +0100 http://bitbucket.org/pypy/pypy/changeset/a852412187c3/ Log: Yay. This is enough at least to have "test_zrpy_gc -k AsmGcc" pass. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -337,11 +337,18 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self): + def get_basic_shape(self, return_addr_words_from_esp=0): # XXX: Should this code even really know about stack frame layout of # the JIT? + if return_addr_words_from_esp == 0: + retaddr = chr(self.LOC_EBP_PLUS | 4) # return addr: at WORD(%rbp) + else: + x = return_addr_words_from_esp * 4 + assert 0 < x < 128 + retaddr = chr(self.LOC_ESP_PLUS | x) + # if self.is_64_bit: - return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + return [retaddr, chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) @@ -350,7 +357,7 @@ chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: - return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) + return [retaddr, chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) @@ -523,7 +530,7 @@ def initialize(self): pass - def get_basic_shape(self): + def get_basic_shape(self, return_addr_words_from_esp=0): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1418,7 +1418,8 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape() + orf = OFFSTACK_REAL_FRAME + shape = gcrootmap.get_basic_shape(return_addr_words_from_esp=orf) for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) From noreply at buildbot.pypy.org Sat Mar 3 00:06:56 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 00:06:56 +0100 (CET) Subject: [pypy-commit] pypy default: disable those prints for now Message-ID: <20120302230656.965588204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53130:c7407123b4a0 Date: 2012-03-02 15:06 -0800 http://bitbucket.org/pypy/pypy/changeset/c7407123b4a0/ Log: disable those prints for now diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests From noreply at buildbot.pypy.org Sat Mar 3 00:11:28 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 00:11:28 +0100 (CET) Subject: [pypy-commit] pypy default: disable more debug prints Message-ID: <20120302231128.AEFEF8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53131:08f324aa11f4 Date: 2012-03-02 15:11 -0800 http://bitbucket.org/pypy/pypy/changeset/08f324aa11f4/ Log: disable more debug prints diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -273,9 +272,9 @@ not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -341,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -353,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -370,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -444,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -527,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -537,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -549,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -576,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): From notifications-noreply at bitbucket.org Sat Mar 3 06:56:29 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 03 Mar 2012 05:56:29 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20120303055629.32490.79569@bitbucket02.managed.contegix.com> You have received a notification from Ross Lagerwall. Hi, I forked pypy. My fork is at https://bitbucket.org/rosslagerwall/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Sat Mar 3 07:27:39 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 07:27:39 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: merge default Message-ID: <20120303062739.CBBBB8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53132:a79aafc3d857 Date: 2012-03-02 19:41 -0800 http://bitbucket.org/pypy/pypy/changeset/a79aafc3d857/ Log: merge default diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py --- a/lib-python/modified-2.7/ctypes/test/test_arrays.py +++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py @@ -1,12 +1,23 @@ import unittest from ctypes import * +from test.test_support import impl_detail formats = "bBhHiIlLqQfd" +# c_longdouble commented out for PyPy, look at the commend in test_longdouble formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ - c_long, c_ulonglong, c_float, c_double, c_longdouble + c_long, c_ulonglong, c_float, c_double #, c_longdouble class ArrayTestCase(unittest.TestCase): + + @impl_detail('long double not supported by PyPy', pypy=False) + def test_longdouble(self): + """ + This test is empty. It's just here to remind that we commented out + c_longdouble in "formats". If pypy will ever supports c_longdouble, we + should kill this test and uncomment c_longdouble inside formats. + """ + def test_simple(self): # create classes holding simple numeric types, and check # various properties. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,9 +1,9 @@ - +import _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import CArgObject +from _ctypes.basics import CArgObject, as_ffi_pointer class ArrayMeta(_CDataMeta): def __new__(self, name, cls, typedict): @@ -211,6 +211,9 @@ def _to_ffi_param(self): return self._get_buffer_value() + def _as_ffi_pointer_(self, ffitype): + return as_ffi_pointer(self, ffitype) + ARRAY_CACHE = {} def create_array_type(base, length): @@ -228,5 +231,6 @@ _type_ = base ) cls = ArrayMeta(name, (Array,), tpdict) + cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype()) ARRAY_CACHE[key] = cls return cls diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -230,5 +230,16 @@ } +# called from primitive.py, pointer.py, array.py +def as_ffi_pointer(value, ffitype): + my_ffitype = type(value).get_ffi_argtype() + # for now, we always allow types.pointer, else a lot of tests + # break. We need to rethink how pointers are represented, though + if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) + return value._get_buffer_value() + + # used by "byref" from _ctypes.pointer import pointer diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -3,7 +3,7 @@ import _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import sizeof, byref +from _ctypes.basics import sizeof, byref, as_ffi_pointer from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\ array_slice_setitem @@ -119,14 +119,6 @@ def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) -def as_ffi_pointer(value, ffitype): - my_ffitype = type(value).get_ffi_argtype() - # for now, we always allow types.pointer, else a lot of tests - # break. We need to rethink how pointers are represented, though - if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError("expected %s instance, got %s" % (type(value), - ffitype)) - return value._get_buffer_value() def _cast_addr(obj, _, tp): if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -105,7 +105,8 @@ BoolOption("sandbox", "Produce a fully-sandboxed executable", default=False, cmdline="--sandbox", requires=[("translation.thread", False)], - suggests=[("translation.gc", "generation")]), + suggests=[("translation.gc", "generation"), + ("translation.gcrootfinder", "shadowstack")]), BoolOption("rweakref", "The backend supports RPython-level weakrefs", default=True), diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -313,5 +313,10 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* the ``__dict__`` attribute of new-style classes returns a normal dict, as + opposed to a dict proxy like in CPython. Mutating the dict will change the + type and vice versa. For builtin types, a dictionary will be returned that + cannot be changed (but still looks and behaves like a normal dictionary). + .. include:: _ref.txt diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -328,7 +328,7 @@ raise modname = self.str_w(w_modname) mod = self.interpclass_w(w_mod) - if isinstance(mod, Module): + if isinstance(mod, Module) and not mod.startup_called: self.timer.start("startup " + modname) mod.init(self) self.timer.stop("startup " + modname) @@ -1471,8 +1471,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -60,11 +60,10 @@ self.pycode = code eval.Frame.__init__(self, space, w_globals) self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize) - self.nlocals = code.co_nlocals self.valuestackdepth = code.co_nlocals self.lastblock = None make_sure_not_resized(self.locals_stack_w) - check_nonneg(self.nlocals) + check_nonneg(self.valuestackdepth) # if space.config.objspace.honor__builtins__: self.builtin = space.builtin.pick_builtin(w_globals) @@ -144,8 +143,8 @@ def execute_frame(self, w_inputvalue=None, operr=None): """Execute this frame. Main entry point to the interpreter. The optional arguments are there to handle a generator's frame: - w_inputvalue is for generator.send()) and operr is for - generator.throw()). + w_inputvalue is for generator.send() and operr is for + generator.throw(). """ # the following 'assert' is an annotation hint: it hides from # the annotator all methods that are defined in PyFrame but @@ -195,7 +194,7 @@ def popvalue(self): depth = self.valuestackdepth - 1 - assert depth >= self.nlocals, "pop from empty value stack" + assert depth >= self.pycode.co_nlocals, "pop from empty value stack" w_object = self.locals_stack_w[depth] self.locals_stack_w[depth] = None self.valuestackdepth = depth @@ -223,7 +222,7 @@ def peekvalues(self, n): values_w = [None] * n base = self.valuestackdepth - n - assert base >= self.nlocals + assert base >= self.pycode.co_nlocals while True: n -= 1 if n < 0: @@ -235,7 +234,8 @@ def dropvalues(self, n): n = hint(n, promote=True) finaldepth = self.valuestackdepth - n - assert finaldepth >= self.nlocals, "stack underflow in dropvalues()" + assert finaldepth >= self.pycode.co_nlocals, ( + "stack underflow in dropvalues()") while True: n -= 1 if n < 0: @@ -267,13 +267,15 @@ # Contrast this with CPython where it's PEEK(-1). index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top - assert index >= self.nlocals, "peek past the bottom of the stack" + assert index >= self.pycode.co_nlocals, ( + "peek past the bottom of the stack") return self.locals_stack_w[index] def settopvalue(self, w_object, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top - assert index >= self.nlocals, "settop past the bottom of the stack" + assert index >= self.pycode.co_nlocals, ( + "settop past the bottom of the stack") self.locals_stack_w[index] = w_object @jit.unroll_safe @@ -320,12 +322,13 @@ else: f_lineno = self.f_lineno - values_w = self.locals_stack_w[self.nlocals:self.valuestackdepth] + nlocals = self.pycode.co_nlocals + values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( - space, self.locals_stack_w[:self.nlocals]) + space, self.locals_stack_w[:nlocals]) if self.last_exception is None: w_exc_value = space.w_None w_tb = space.w_None @@ -442,7 +445,7 @@ """Initialize the fast locals from a list of values, where the order is according to self.pycode.signature().""" scope_len = len(scope_w) - if scope_len > self.nlocals: + if scope_len > self.pycode.co_nlocals: raise ValueError, "new fastscope is longer than the allocated area" # don't assign directly to 'locals_stack_w[:scope_len]' to be # virtualizable-friendly @@ -456,7 +459,7 @@ pass def getfastscopelength(self): - return self.nlocals + return self.pycode.co_nlocals def getclosure(self): return None diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -322,3 +322,14 @@ space.ALL_BUILTIN_MODULES.pop() del space._builtinmodule_list mods = space.get_builtinmodule_to_install() + + def test_dont_reload_builtin_mods_on_startup(self): + from pypy.tool.option import make_config, make_objspace + config = make_config(None) + space = make_objspace(config) + w_executable = space.wrap('executable') + assert space.str_w(space.getattr(space.sys, w_executable)) == 'py.py' + space.setattr(space.sys, w_executable, space.wrap('foobar')) + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' + space.startup() + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -17,14 +17,14 @@ def test_executable(): """Ensures sys.executable points to the py.py script""" # TODO : watch out for spaces/special chars in pypypath - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.executable") assert output.splitlines()[-1] == pypypath def test_special_names(): """Test the __name__ and __file__ special global names""" cmd = "print __name__; print '__file__' in globals()" - output = run(sys.executable, pypypath, '-c', cmd) + output = run(sys.executable, pypypath, '-S', '-c', cmd) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == 'False' @@ -33,24 +33,24 @@ tmpfile.write("print __name__; print __file__\n") tmpfile.close() - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == str(tmpfilepath) def test_argv_command(): """Some tests on argv""" # test 1 : no arguments - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv") assert output.splitlines()[-1] == str(['-c']) # test 2 : some arguments after - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-O", "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) @@ -65,15 +65,15 @@ tmpfile.close() # test 1 : no arguments - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-1] == str([tmpfilepath]) # test 2 : some arguments after - output = run(sys.executable, pypypath, tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, "-O", tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', "-O", tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) @@ -95,7 +95,7 @@ tmpfile.write(TB_NORMALIZATION_CHK) tmpfile.close() - popen = subprocess.Popen([sys.executable, str(pypypath), tmpfilepath], + popen = subprocess.Popen([sys.executable, str(pypypath), '-S', tmpfilepath], stderr=subprocess.PIPE) _, stderr = popen.communicate() assert stderr.endswith('KeyError: \n') diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,7 +1,6 @@ import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup @@ -209,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -337,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -367,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -519,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): @@ -770,11 +774,19 @@ self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) - # Rarely called: allocate a fixed-size amount of bytes, but - # not in the nursery, because it is too big. Implemented like - # malloc_nursery_slowpath() above. - self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, - [lltype.Signed]) + # Never called as far as I can tell, but there for completeness: + # allocate a fixed-size object, but not in the nursery, because + # it is too big. + def malloc_big_fixedsize(size, tid): + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) def _bh_malloc(self, sizedescr): from pypy.rpython.memory.gctypelayout import check_typeid diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -96,8 +96,10 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - self.gen_malloc_nursery(size, op.result) - self.gen_initialize_tid(op.result, descr.tid) + if self.gen_malloc_nursery(size, op.result): + self.gen_initialize_tid(op.result, descr.tid) + else: + self.gen_malloc_fixedsize(size, descr.tid, op.result) def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) @@ -112,8 +114,8 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily - self.gen_malloc_nursery(total_size, op.result) + if (total_size >= 0 and + self.gen_malloc_nursery(total_size, op.result)): self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) elif self.gc_ll_descr.kind == 'boehm': @@ -147,13 +149,22 @@ # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None - def gen_malloc_fixedsize(self, size, v_result): - """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). - Note that with the framework GC, this should be called very rarely. + def gen_malloc_fixedsize(self, size, typeid, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). + Used on Boehm, and on the framework GC for large fixed-size + mallocs. (For all I know this latter case never occurs in + practice, but better safe than sorry.) """ - addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') - self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, - self.gc_ll_descr.malloc_fixedsize_descr) + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + assert (size & (WORD-1)) == 0, "size not aligned?" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + else: # Boehm + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + args = [ConstInt(addr), ConstInt(size)] + descr = self.gc_ll_descr.malloc_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -211,8 +222,7 @@ """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_fixedsize(size, v_result) - return + return False # op = None if self._op_malloc_nursery is not None: @@ -238,6 +248,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -119,12 +119,19 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=alendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + 10, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(adescr.basesize + 10 * adescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=alendescr) def test_new_array_variable(self): self.check_rewrite(""" @@ -178,13 +185,20 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=unicodelendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(unicodedescr.basesize)d, \ + 10, \ + %(unicodedescr.itemsize)d, \ + %(unicodelendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(unicodedescr.basesize + \ +## 10 * unicodedescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=unicodelendescr) class TestFramework(RewriteTests): @@ -203,7 +217,7 @@ # class FakeCPU(object): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(102) + descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -368,11 +382,9 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 104)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 103, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 103, \ + descr=malloc_array_descr) jump() """) @@ -435,9 +447,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 9315, descr=tiddescr) + p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ + descr=malloc_big_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() @@ -2221,6 +2253,35 @@ print 'step 4 ok' print '-'*79 + def test_guard_not_invalidated_and_label(self): + # test that the guard_not_invalidated reserves enough room before + # the label. If it doesn't, then in this example after we invalidate + # the guard, jumping to the label will hit the invalidation code too + cpu = self.cpu + i0 = BoxInt() + faildescr = BasicFailDescr(1) + labeldescr = TargetToken() + ops = [ + ResOperation(rop.GUARD_NOT_INVALIDATED, [], None, descr=faildescr), + ResOperation(rop.LABEL, [i0], None, descr=labeldescr), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(3)), + ] + ops[0].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop([i0], ops, looptoken) + # mark as failing + self.cpu.invalidate_loop(looptoken) + # attach a bridge + i2 = BoxInt() + ops = [ + ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), + ] + self.cpu.compile_bridge(faildescr, [], ops, looptoken) + # run: must not be caught in an infinite loop + fail = self.cpu.execute_token(looptoken, 16) + assert fail.identifier == 3 + assert self.cpu.get_latest_value_int(0) == 333 + # pure do_ / descr features def test_do_operations(self): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -165,7 +165,6 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None - self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -199,8 +198,13 @@ operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] + self.min_bytes_before_label = 0 return operations + def ensure_next_label_is_at_least_at_position(self, at_least_position): + self.min_bytes_before_label = max(self.min_bytes_before_label, + at_least_position) + def reserve_param(self, n): self.param_depth = max(self.param_depth, n) @@ -468,7 +472,11 @@ self.assembler.mc.mark_op(None) # end of the loop def flush_loop(self): - # rare case: if the loop is too short, pad with NOPs + # rare case: if the loop is too short, or if we are just after + # a GUARD_NOT_INVALIDATED, pad with NOPs. Important! This must + # be called to ensure that there are enough bytes produced, + # because GUARD_NOT_INVALIDATED or redirect_call_assembler() + # will maybe overwrite them. mc = self.assembler.mc while mc.get_relative_pos() < self.min_bytes_before_label: mc.NOP() @@ -558,7 +566,15 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) - consider_guard_not_invalidated = consider_guard_no_exception + def consider_guard_not_invalidated(self, op): + mc = self.assembler.mc + n = mc.get_relative_pos() + self.perform_guard(op, [], None) + assert n == mc.get_relative_pos() + # ensure that the next label is at least 5 bytes farther than + # the current position. Otherwise, when invalidating the guard, + # we would overwrite randomly the next label's position. + self.ensure_next_label_is_at_least_at_position(n + 5) def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) @@ -1377,7 +1393,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.jit.backend.x86.arch import WORD def values_array(TP, size): @@ -37,8 +38,13 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None + # XXX check for SSE2 on win32 too else: + if WORD == 4: + extra = ['-DPYPY_X86_CHECK_SSE2'] + else: + extra = [] ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'], + '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra, )) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py --- a/pypy/jit/backend/x86/test/test_ztranslation.py +++ b/pypy/jit/backend/x86/test/test_ztranslation.py @@ -52,6 +52,7 @@ set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) + j = float(j) while frame.i > 3: jitdriver.can_enter_jit(frame=frame, total=total, j=j) jitdriver.jit_merge_point(frame=frame, total=total, j=j) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -289,8 +289,21 @@ assert isinstance(token, TargetToken) assert token.original_jitcell_token is None token.original_jitcell_token = trace.original_jitcell_token - - + + +def do_compile_loop(metainterp_sd, inputargs, operations, looptoken, + log=True, name=''): + metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, + 'compiling', name=name) + return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + log=log, name=name) + +def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, + original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2) + return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -319,9 +332,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - original_jitcell_token, - name=loopname) + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -333,7 +346,6 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: @@ -365,9 +377,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs, - operations, - original_loop_token) + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -18,6 +18,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif number == -2: + debug_start("jit-log-compiling-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, @@ -31,6 +35,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif number == -2: + debug_start("jit-log-compiling-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -567,7 +567,7 @@ assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) try: - newboxes = modifier.finish(self.values, self.pendingfields) + newboxes = modifier.finish(self, self.pendingfields) if len(newboxes) > self.metainterp_sd.options.failargs_limit: raise resume.TagOverflow except resume.TagOverflow: diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,40 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_issue1045(self): + ops = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) + jump(i55) + """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -423,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) @@ -457,7 +491,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7760,6 +7760,59 @@ """ self.optimize_loop(ops, expected) + def test_constant_failargs(self): + ops = """ + [p1, i2, i3] + setfield_gc(p1, ConstPtr(myptr), descr=nextdescr) + p16 = getfield_gc(p1, descr=nextdescr) + guard_true(i2) [p16, i3] + jump(p1, i3, i2) + """ + preamble = """ + [p1, i2, i3] + setfield_gc(p1, ConstPtr(myptr), descr=nextdescr) + guard_true(i2) [i3] + jump(p1, i3) + """ + expected = """ + [p1, i3] + guard_true(i3) [] + jump(p1, 1) + """ + self.optimize_loop(ops, expected, preamble) + + def test_issue1048(self): + ops = """ + [p1, i2, i3] + p16 = getfield_gc(p1, descr=nextdescr) + guard_true(i2) [p16] + setfield_gc(p1, ConstPtr(myptr), descr=nextdescr) + jump(p1, i3, i2) + """ + expected = """ + [p1, i3] + guard_true(i3) [] + jump(p1, 1) + """ + self.optimize_loop(ops, expected) + + def test_issue1048_ok(self): + ops = """ + [p1, i2, i3] + p16 = getfield_gc(p1, descr=nextdescr) + call(p16, descr=nonwritedescr) + guard_true(i2) [p16] + setfield_gc(p1, ConstPtr(myptr), descr=nextdescr) + jump(p1, i3, i2) + """ + expected = """ + [p1, i3] + call(ConstPtr(myptr), descr=nonwritedescr) + guard_true(i3) [] + jump(p1, 1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -260,7 +259,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,12 +267,14 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -339,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -351,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -368,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -442,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -525,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -535,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -547,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -574,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: @@ -2349,7 +2350,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -182,23 +182,22 @@ # env numbering - def number(self, values, snapshot): + def number(self, optimizer, snapshot): if snapshot is None: return lltype.nullptr(NUMBERING), {}, 0 if snapshot in self.numberings: numb, liveboxes, v = self.numberings[snapshot] return numb, liveboxes.copy(), v - numb1, liveboxes, v = self.number(values, snapshot.prev) + numb1, liveboxes, v = self.number(optimizer, snapshot.prev) n = len(liveboxes)-v boxes = snapshot.boxes length = len(boxes) numb = lltype.malloc(NUMBERING, length) for i in range(length): box = boxes[i] - value = values.get(box, None) - if value is not None: - box = value.get_key_box() + value = optimizer.getvalue(box) + box = value.get_key_box() if isinstance(box, Const): tagged = self.getconst(box) @@ -318,14 +317,14 @@ _, tagbits = untag(tagged) return tagbits == TAGVIRTUAL - def finish(self, values, pending_setfields=[]): + def finish(self, optimizer, pending_setfields=[]): # compute the numbering storage = self.storage # make sure that nobody attached resume data to this guard yet assert not storage.rd_numb snapshot = storage.rd_snapshot assert snapshot is not None # is that true? - numb, liveboxes_from_env, v = self.memo.number(values, snapshot) + numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb @@ -341,23 +340,23 @@ liveboxes[i] = box else: assert tagbits == TAGVIRTUAL - value = values[box] + value = optimizer.getvalue(box) value.get_args_for_fail(self) for _, box, fieldbox, _ in pending_setfields: self.register_box(box) self.register_box(fieldbox) - value = values[fieldbox] + value = optimizer.getvalue(fieldbox) value.get_args_for_fail(self) - self._number_virtuals(liveboxes, values, v) + self._number_virtuals(liveboxes, optimizer, v) self._add_pending_fields(pending_setfields) storage.rd_consts = self.memo.consts dump_storage(storage, liveboxes) return liveboxes[:] - def _number_virtuals(self, liveboxes, values, num_env_virtuals): + def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals): # !! 'liveboxes' is a list that is extend()ed in-place !! memo = self.memo new_liveboxes = [None] * memo.num_cached_boxes() @@ -397,7 +396,7 @@ memo.nvholes += length - len(vfieldboxes) for virtualbox, fieldboxes in vfieldboxes.iteritems(): num, _ = untag(self.liveboxes[virtualbox]) - value = values[virtualbox] + value = optimizer.getvalue(virtualbox) fieldnums = [self._gettagged(box) for box in fieldboxes] vinfo = value.make_virtual_info(self, fieldnums) @@ -1102,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: @@ -2943,11 +3002,18 @@ self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) + myjitdriver = JitDriver(greens = [], reds = ['n', 'a']) + class A: + pass def f(n): sa = i = rffi.cast(rffi.ULONGLONG, 1) + a = A() while i < rffi.cast(rffi.ULONGLONG, n): - myjitdriver.jit_merge_point(sa=sa, n=n, i=i) + a.sa = sa + a.i = i + myjitdriver.jit_merge_point(n=n, a=a) + sa = a.sa + i = a.i sa += sa % i i += 1 res = self.meta_interp(f, [32]) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe def test_get_current_qmut_instance(): @@ -480,6 +480,32 @@ assert res == 1 self.check_jitcell_token_count(2) + def test_for_loop_array(self): + myjitdriver = JitDriver(greens=[], reds=["n", "i"]) + class Foo(object): + _immutable_fields_ = ["x?[*]"] + def __init__(self, x): + self.x = x + f = Foo([1, 3, 5, 6]) + @unroll_safe + def g(v): + for x in f.x: + if x & 1 == 0: + v += 1 + return v + def main(n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i) + i = g(i) + return i + res = self.meta_interp(main, [10]) + assert res == 10 + self.check_resops({ + "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2, + "guard_not_invalidated": 2 + }) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -18,6 +18,19 @@ rd_virtuals = None rd_pendingfields = None + +class FakeOptimizer(object): + def __init__(self, values): + self.values = values + + def getvalue(self, box): + try: + value = self.values[box] + except KeyError: + value = self.values[box] = OptValue(box) + return value + + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) assert tag(-3, 2) == rffi.r_short(-3<<2|2) @@ -500,7 +513,7 @@ capture_resumedata(fs, None, [], storage) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() b1t, b2t, b3t = [BoxInt(), BoxPtr(), BoxInt()] @@ -524,7 +537,7 @@ capture_resumedata(fs, [b4], [], storage) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() b1t, b2t, b3t, b4t = [BoxInt(), BoxPtr(), BoxInt(), BoxPtr()] @@ -553,10 +566,10 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes2 = modifier.finish({}) + liveboxes2 = modifier.finish(FakeOptimizer({})) metainterp = MyMetaInterp() @@ -617,7 +630,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage.rd_virtuals) == 1 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), tag(0, TAGCONST)] @@ -628,7 +641,7 @@ values = {b2: virtual_value(b2, b4, v6), b6: v6} memo.clear_box_virtual_numbers() modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes2 = modifier.finish(values) + liveboxes2 = modifier.finish(FakeOptimizer(values)) assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[0].fieldnums == [tag(len(liveboxes2)-1, TAGBOX), tag(-1, TAGVIRTUAL)] @@ -674,7 +687,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage.rd_virtuals) == 1 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), tag(0, TAGCONST)] @@ -684,7 +697,7 @@ capture_resumedata(fs, None, [], storage2) values[b4] = virtual_value(b4, b6, c4) modifier = ResumeDataVirtualAdder(storage2, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[1].fieldnums == storage.rd_virtuals[0].fieldnums assert storage2.rd_virtuals[1] is storage.rd_virtuals[0] @@ -703,7 +716,7 @@ v1.setfield(LLtypeMixin.nextdescr, v2) values = {b1: v1, b2: v2} modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert liveboxes == [b3] assert len(storage.rd_virtuals) == 2 assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), @@ -776,7 +789,7 @@ memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - numb, liveboxes, v = memo.number({}, snap1) + numb, liveboxes, v = memo.number(FakeOptimizer({}), snap1) assert v == 0 assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -788,7 +801,7 @@ tag(0, TAGBOX), tag(2, TAGINT)] assert not numb.prev.prev - numb2, liveboxes2, v = memo.number({}, snap2) + numb2, liveboxes2, v = memo.number(FakeOptimizer({}), snap2) assert v == 0 assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -813,7 +826,8 @@ return self.virt # renamed - numb3, liveboxes3, v = memo.number({b3: FakeValue(False, c4)}, snap3) + numb3, liveboxes3, v = memo.number(FakeOptimizer({b3: FakeValue(False, c4)}), + snap3) assert v == 0 assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)} @@ -825,7 +839,8 @@ env4 = [c3, b4, b1, c3] snap4 = Snapshot(snap, env4) - numb4, liveboxes4, v = memo.number({b4: FakeValue(True, b4)}, snap4) + numb4, liveboxes4, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4)}), + snap4) assert v == 1 assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -837,8 +852,9 @@ env5 = [b1, b4, b5] snap5 = Snapshot(snap4, env5) - numb5, liveboxes5, v = memo.number({b4: FakeValue(True, b4), - b5: FakeValue(True, b5)}, snap5) + numb5, liveboxes5, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4), + b5: FakeValue(True, b5)}), + snap5) assert v == 2 assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -940,7 +956,7 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) assert storage.rd_snapshot is None cpu = MyCPU([]) reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage) @@ -954,14 +970,14 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - modifier.finish({}) + modifier.finish(FakeOptimizer({})) assert len(memo.consts) == 2 assert storage.rd_consts is memo.consts b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)] storage2 = make_storage(b1s, b2s, b3s) modifier2 = ResumeDataVirtualAdder(storage2, memo) - modifier2.finish({}) + modifier2.finish(FakeOptimizer({})) assert len(memo.consts) == 3 assert storage2.rd_consts is memo.consts @@ -1022,7 +1038,7 @@ val = FakeValue() values = {b1s: val, b2s: val} - liveboxes = modifier.finish(values) + liveboxes = modifier.finish(FakeOptimizer(values)) assert storage.rd_snapshot is None b1t, b3t = [BoxInt(11), BoxInt(33)] newboxes = _resume_remap(liveboxes, [b1_2, b3s], b1t, b3t) @@ -1043,7 +1059,7 @@ storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) - liveboxes = modifier.finish({}) + liveboxes = modifier.finish(FakeOptimizer({})) b2t, b3t = [BoxPtr(demo55o), BoxInt(33)] newboxes = _resume_remap(liveboxes, [b2s, b3s], b2t, b3t) metainterp = MyMetaInterp() @@ -1086,7 +1102,7 @@ values = {b2s: v2, b4s: v4} liveboxes = [] - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume @@ -1156,7 +1172,7 @@ modifier.register_virtual_fields(b2s, [b4s, c1s]) liveboxes = [] values = {b2s: v2} - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) dump_storage(storage, liveboxes) storage.rd_consts = memo.consts[:] storage.rd_numb = None @@ -1203,7 +1219,7 @@ v2.setfield(LLtypeMixin.bdescr, OptValue(b4s)) modifier.register_virtual_fields(b2s, [c1s, b4s]) liveboxes = [] - modifier._number_virtuals(liveboxes, {b2s: v2}, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer({b2s: v2}), 0) dump_storage(storage, liveboxes) storage.rd_consts = memo.consts[:] storage.rd_numb = None @@ -1249,7 +1265,7 @@ values = {b4s: v4, b2s: v2} liveboxes = [] - modifier._number_virtuals(liveboxes, values, 0) + modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0) assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s] modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s, -1)]) storage.rd_consts = memo.consts[:] diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) @@ -453,7 +453,7 @@ if sys.stdout == sys.__stdout__: import pdb; pdb.post_mortem(tb) raise e.__class__, e, tb - fatalerror('~~~ Crash in JIT! %s' % (e,), traceback=True) + fatalerror('~~~ Crash in JIT! %s' % (e,)) crash_in_jit._dont_inline_ = True if self.translator.rtyper.type_system.name == 'lltypesystem': diff --git a/pypy/jit/tl/tinyframe/tinyframe.py b/pypy/jit/tl/tinyframe/tinyframe.py --- a/pypy/jit/tl/tinyframe/tinyframe.py +++ b/pypy/jit/tl/tinyframe/tinyframe.py @@ -210,7 +210,7 @@ def repr(self): return "" % (self.outer.repr(), self.inner.repr()) -driver = JitDriver(greens = ['code', 'i'], reds = ['self'], +driver = JitDriver(greens = ['i', 'code'], reds = ['self'], virtualizables = ['self']) class Frame(object): diff --git a/pypy/module/_demo/test/test_sieve.py b/pypy/module/_demo/test/test_sieve.py new file mode 100644 --- /dev/null +++ b/pypy/module/_demo/test/test_sieve.py @@ -0,0 +1,12 @@ +from pypy.conftest import gettestobjspace + + +class AppTestSieve: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('_demo',)) + + def test_sieve(self): + import _demo + lst = _demo.sieve(100) + assert lst == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, + 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97] diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -28,6 +28,7 @@ } def init(self, space): + MixedModule.init(self, space) w_UnsupportedOperation = space.call_function( space.w_type, space.wrap('UnsupportedOperation'), @@ -35,3 +36,9 @@ space.newdict()) space.setattr(self, space.wrap('UnsupportedOperation'), w_UnsupportedOperation) + + def shutdown(self, space): + # at shutdown, flush all open streams. Ignore I/O errors. + from pypy.module._io.interp_iobase import get_autoflushher + get_autoflushher(space).flush_all(space) + diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,6 +5,8 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib.rstring import StringBuilder +from pypy.rlib import rweakref + DEFAULT_BUFFER_SIZE = 8192 @@ -43,6 +45,8 @@ self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False + self.streamholder = None # needed by AutoFlusher + get_autoflushher(space).add(self) def getdict(self, space): return self.w_dict @@ -98,6 +102,7 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True + get_autoflushher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -303,3 +308,60 @@ read = interp2app(W_RawIOBase.read_w), readall = interp2app(W_RawIOBase.readall_w), ) + + +# ------------------------------------------------------------ +# functions to make sure that all streams are flushed on exit +# ------------------------------------------------------------ + +class StreamHolder(object): + + def __init__(self, w_iobase): + self.w_iobase_ref = rweakref.ref(w_iobase) + w_iobase.autoflusher = self + + def autoflush(self, space): + w_iobase = self.w_iobase_ref() + if w_iobase is not None: + try: + space.call_method(w_iobase, 'flush') + except OperationError, e: + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): + raise + + +class AutoFlusher(object): + + def __init__(self, space): + self.streams = {} + + def add(self, w_iobase): + assert w_iobase.streamholder is None + holder = StreamHolder(w_iobase) + w_iobase.streamholder = holder + self.streams[holder] = None + + def remove(self, w_iobase): + holder = w_iobase.streamholder + if holder is not None: + del self.streams[holder] + + def flush_all(self, space): + while self.streams: + for streamholder in self.streams.keys(): + try: + del self.streams[streamholder] + except KeyError: + pass # key was removed in the meantime + else: + streamholder.autoflush(space) + + +def get_autoflushher(space): + return space.fromcache(AutoFlusher) + + diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -160,3 +160,42 @@ f.close() assert repr(f) == "<_io.FileIO [closed]>" +def test_flush_at_exit(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + from pypy.tool.udir import udir + + tmpfile = udir.join('test_flush_at_exit') + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([space.wrap(str(tmpfile))], """(tmpfile): + import io + f = io.open(tmpfile, 'w', encoding='ascii') + f.write('42') + # no flush() and no close() + import sys; sys._keepalivesomewhereobscure = f + """) + space.finish() + assert tmpfile.read() == '42' + +def test_flush_at_exit_IOError_and_ValueError(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([], """(): + import io + class MyStream(io.IOBase): + def flush(self): + raise IOError + + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + + s = MyStream() + s2 = MyStream2() + import sys; sys._keepalivesomewhereobscure = s + """) + space.finish() # the IOError has been ignored diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -159,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -583,13 +586,29 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) - else: - return space.w_NotImplemented + @specialize.arg(3) + def _cmp_impl(space, self, other, space_fn): + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) + + def eq__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -851,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -874,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -384,6 +384,8 @@ "Dict": "space.w_dict", "Tuple": "space.w_tuple", "List": "space.w_list", + "Set": "space.w_set", + "FrozenSet": "space.w_frozenset", "Int": "space.w_int", "Bool": "space.w_bool", "Float": "space.w_float", @@ -405,7 +407,7 @@ }.items(): GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Int Long Dict Tuple Class'.split(): + for cpyname in 'Method List Long Dict Tuple Class'.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' 'Py%sObject' % (cpyname, )) build_exported_objects() @@ -434,16 +436,16 @@ ('buf', rffi.VOIDP), ('obj', PyObject), ('len', Py_ssize_t), - # ('itemsize', Py_ssize_t), + ('itemsize', Py_ssize_t), - # ('readonly', lltype.Signed), - # ('ndim', lltype.Signed), - # ('format', rffi.CCHARP), - # ('shape', Py_ssize_tP), - # ('strides', Py_ssize_tP), - # ('suboffets', Py_ssize_tP), - # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - # ('internal', rffi.VOIDP) + ('readonly', lltype.Signed), + ('ndim', lltype.Signed), + ('format', rffi.CCHARP), + ('shape', Py_ssize_tP), + ('strides', Py_ssize_tP), + ('suboffsets', Py_ssize_tP), + #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), + ('internal', rffi.VOIDP) )) @specialize.memo() diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -6,6 +6,7 @@ from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError +from pypy.rlib.objectmodel import specialize @cpython_api([], PyObject) def PyDict_New(space): @@ -183,11 +184,34 @@ w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) state = space.fromcache(RefcountState) - pkey[0] = state.make_borrowed(w_dict, w_key) - pvalue[0] = state.make_borrowed(w_dict, w_value) + if pkey: + pkey[0] = state.make_borrowed(w_dict, w_key) + if pvalue: + pvalue[0] = state.make_borrowed(w_dict, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): raise return 0 return 1 + + at specialize.memo() +def make_frozendict(space): + return space.appexec([], '''(): + import collections + class FrozenDict(collections.Mapping): + def __init__(self, *args, **kwargs): + self._d = dict(*args, **kwargs) + def __iter__(self): + return iter(self._d) + def __len__(self): + return len(self._d) + def __getitem__(self, key): + return self._d[key] + return FrozenDict''') + + at cpython_api([PyObject], PyObject) +def PyDictProxy_New(space, w_dict): + w_frozendict = make_frozendict(space) + return space.call_function(w_frozendict, w_dict) + diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -1,16 +1,24 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler import consts from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno +from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling PyCompilerFlags = cpython_struct( - "PyCompilerFlags", ()) + "PyCompilerFlags", (("cf_flags", rffi.INT),)) PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags) +PyCF_MASK = (consts.CO_FUTURE_DIVISION | + consts.CO_FUTURE_ABSOLUTE_IMPORT | + consts.CO_FUTURE_WITH_STATEMENT | + consts.CO_FUTURE_PRINT_FUNCTION | + consts.CO_FUTURE_UNICODE_LITERALS) + @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) @@ -48,6 +56,17 @@ return None return borrow_from(None, caller.w_globals) + at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) +def PyEval_EvalCode(space, w_code, w_globals, w_locals): + """This is a simplified interface to PyEval_EvalCodeEx(), with just + the code object, and the dictionaries of global and local variables. + The other arguments are set to NULL.""" + if w_globals is None: + w_globals = space.w_None + if w_locals is None: + w_locals = space.w_None + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([PyObject, PyObject], PyObject) def PyObject_CallObject(space, w_obj, w_arg): """ @@ -74,7 +93,7 @@ Py_file_input = 257 Py_eval_input = 258 -def compile_string(space, source, filename, start): +def compile_string(space, source, filename, start, flags=0): w_source = space.wrap(source) start = rffi.cast(lltype.Signed, start) if start == Py_file_input: @@ -86,7 +105,7 @@ else: raise OperationError(space.w_ValueError, space.wrap( "invalid mode parameter for compilation")) - return compiling.compile(space, w_source, filename, mode) + return compiling.compile(space, w_source, filename, mode, flags) def run_string(space, source, filename, start, w_globals, w_locals): w_code = compile_string(space, source, filename, start) @@ -109,6 +128,24 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) + at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + PyCompilerFlagsPtr], PyObject) +def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): + """Execute Python source code from str in the context specified by the + dictionaries globals and locals with the compiler flags specified by + flags. The parameter start specifies the start token that should be used to + parse the source code. + + Returns the result of executing the code as a Python object, or NULL if an + exception was raised.""" + source = rffi.charp2str(source) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + w_code = compile_string(space, source, "", start, flags) + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([FILEP, CONST_STRING, rffi.INT_real, PyObject, PyObject], PyObject) def PyRun_File(space, fp, filename, start, w_globals, w_locals): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -150,7 +187,7 @@ @cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], PyObject) -def Py_CompileStringFlags(space, source, filename, start, flags): +def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the resulting code object. The start token is given by start; this can be used to constrain the code which can be compiled and should @@ -160,7 +197,30 @@ returns NULL if the code cannot be parsed or compiled.""" source = rffi.charp2str(source) filename = rffi.charp2str(filename) - if flags: - raise OperationError(space.w_NotImplementedError, space.wrap( - "cpyext Py_CompileStringFlags does not accept flags")) - return compile_string(space, source, filename, start) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + return compile_string(space, source, filename, start, flags) + + at cpython_api([PyCompilerFlagsPtr], rffi.INT_real, error=CANNOT_FAIL) +def PyEval_MergeCompilerFlags(space, cf): + """This function changes the flags of the current evaluation + frame, and returns true on success, false on failure.""" + flags = rffi.cast(lltype.Signed, cf.c_cf_flags) + result = flags != 0 + current_frame = space.getexecutioncontext().gettopframe_nohidden() + if current_frame: + codeflags = current_frame.pycode.co_flags + compilerflags = codeflags & PyCF_MASK + if compilerflags: + result = 1 + flags |= compilerflags + # No future keyword at the moment + # if codeflags & CO_GENERATOR_ALLOWED: + # result = 1 + # flags |= CO_GENERATOR_ALLOWED + cf.c_cf_flags = rffi.cast(rffi.INT, flags) + return result + + diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -1,6 +1,6 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - PyObjectFields, generic_cpy_call, CONST_STRING, + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) @@ -48,6 +48,7 @@ PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function) PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) +PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) def function_attach(space, py_obj, w_obj): py_func = rffi.cast(PyFunctionObject, py_obj) @@ -167,3 +168,9 @@ freevars=[], cellvars=[])) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCode_GetNumFree(space, w_co): + """Return the number of free variables in co.""" + co = space.interp_w(PyCode, w_co) + return len(co.co_freevars) + diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -113,6 +113,7 @@ #include "compile.h" #include "frameobject.h" #include "eval.h" +#include "pymath.h" #include "pymem.h" #include "pycobject.h" #include "pycapsule.h" diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h --- a/pypy/module/cpyext/include/code.h +++ b/pypy/module/cpyext/include/code.h @@ -13,13 +13,19 @@ /* Masks for co_flags above */ /* These values are also in funcobject.py */ -#define CO_OPTIMIZED 0x0001 -#define CO_NEWLOCALS 0x0002 -#define CO_VARARGS 0x0004 -#define CO_VARKEYWORDS 0x0008 +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 #define CO_NESTED 0x0010 #define CO_GENERATOR 0x0020 +#define CO_FUTURE_DIVISION 0x02000 +#define CO_FUTURE_ABSOLUTE_IMPORT 0x04000 +#define CO_FUTURE_WITH_STATEMENT 0x08000 +#define CO_FUTURE_PRINT_FUNCTION 0x10000 +#define CO_FUTURE_UNICODE_LITERALS 0x20000 + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -7,6 +7,11 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + long ob_ival; +} PyIntObject; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/methodobject.h b/pypy/module/cpyext/include/methodobject.h --- a/pypy/module/cpyext/include/methodobject.h +++ b/pypy/module/cpyext/include/methodobject.h @@ -26,6 +26,7 @@ PyObject_HEAD PyMethodDef *m_ml; /* Description of the C function to call */ PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */ + PyObject *m_module; /* The __module__ attribute, can be anything */ } PyCFunctionObject; /* Flag passed to newmethodobject */ diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* @@ -131,18 +133,18 @@ /* This is Py_ssize_t so it can be pointed to by strides in simple case.*/ - /* Py_ssize_t itemsize; */ - /* int readonly; */ - /* int ndim; */ - /* char *format; */ - /* Py_ssize_t *shape; */ - /* Py_ssize_t *strides; */ - /* Py_ssize_t *suboffsets; */ + Py_ssize_t itemsize; + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - /* void *internal; */ + void *internal; } Py_buffer; diff --git a/pypy/module/cpyext/include/pymath.h b/pypy/module/cpyext/include/pymath.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/pymath.h @@ -0,0 +1,20 @@ +#ifndef Py_PYMATH_H +#define Py_PYMATH_H + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to mathematical +functions and constants +**************************************************************************/ + +/* HUGE_VAL is supposed to expand to a positive double infinity. Python + * uses Py_HUGE_VAL instead because some platforms are broken in this + * respect. We used to embed code in pyport.h to try to worm around that, + * but different platforms are broken in conflicting ways. If you're on + * a platform where HUGE_VAL is defined incorrectly, fiddle your Python + * config to #define Py_HUGE_VAL to something that works on your platform. + */ +#ifndef Py_HUGE_VAL +#define Py_HUGE_VAL HUGE_VAL +#endif + +#endif /* Py_PYMATH_H */ diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -10,6 +10,7 @@ typedef struct _ts { PyInterpreterState *interp; + PyObject *dict; /* Stores per-thread state */ } PyThreadState; #define Py_BEGIN_ALLOW_THREADS { \ @@ -24,4 +25,6 @@ enum {PyGILState_LOCKED, PyGILState_UNLOCKED} PyGILState_STATE; +#define PyThreadState_GET() PyThreadState_Get() + #endif /* !Py_PYSTATE_H */ diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -19,6 +19,14 @@ int cf_flags; /* bitmask of CO_xxx flags relevant to future */ } PyCompilerFlags; +#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \ + CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \ + CO_FUTURE_UNICODE_LITERALS) +#define PyCF_MASK_OBSOLETE (CO_NESTED) +#define PyCF_SOURCE_IS_UTF8 0x0100 +#define PyCF_DONT_IMPLY_DEDENT 0x0200 +#define PyCF_ONLY_AST 0x0400 + #define Py_CompileString(str, filename, start) Py_CompileStringFlags(str, filename, start, NULL) #ifdef __cplusplus diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h --- a/pypy/module/cpyext/include/pythread.h +++ b/pypy/module/cpyext/include/pythread.h @@ -1,6 +1,8 @@ #ifndef Py_PYTHREAD_H #define Py_PYTHREAD_H +#define WITH_THREAD + typedef void *PyThread_type_lock; #define WAIT_LOCK 1 #define NOWAIT_LOCK 0 diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -20,7 +20,7 @@ } PyMemberDef; -/* Types */ +/* Types. These constants are also in structmemberdefs.py. */ #define T_SHORT 0 #define T_INT 1 #define T_LONG 2 @@ -42,9 +42,12 @@ #define T_LONGLONG 17 #define T_ULONGLONG 18 -/* Flags */ +/* Flags. These constants are also in structmemberdefs.py. */ #define READONLY 1 #define RO READONLY /* Shorthand */ +#define READ_RESTRICTED 2 +#define PY_WRITE_RESTRICTED 4 +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) #ifdef __cplusplus diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -2,11 +2,37 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( - cpython_api, build_type_checkers, PyObject, - CONST_STRING, CANNOT_FAIL, Py_ssize_t) + cpython_api, cpython_struct, build_type_checkers, bootstrap_function, + PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, RefcountState, from_ref) from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST +from pypy.objspace.std.intobject import W_IntObject import sys +PyIntObjectStruct = lltype.ForwardReference() +PyIntObject = lltype.Ptr(PyIntObjectStruct) +PyIntObjectFields = PyObjectFields + \ + (("ob_ival", rffi.LONG),) +cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct) + + at bootstrap_function +def init_intobject(space): + "Type description of PyIntObject" + make_typedescr(space.w_int.instancetypedef, + basestruct=PyIntObject.TO, + realize=int_realize) + +def int_realize(space, obj): + intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_IntObject, w_type) + w_obj.__init__(intval) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj + PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") @cpython_api([], lltype.Signed, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -32,6 +32,7 @@ PyObjectFields + ( ('m_ml', lltype.Ptr(PyMethodDef)), ('m_self', PyObject), + ('m_module', PyObject), )) PyCFunctionObject = lltype.Ptr(PyCFunctionObjectStruct) @@ -47,11 +48,13 @@ assert isinstance(w_obj, W_PyCFunctionObject) py_func.c_m_ml = w_obj.ml py_func.c_m_self = make_ref(space, w_obj.w_self) + py_func.c_m_module = make_ref(space, w_obj.w_module) @cpython_api([PyObject], lltype.Void, external=False) def cfunction_dealloc(space, py_obj): py_func = rffi.cast(PyCFunctionObject, py_obj) Py_DecRef(space, py_func.c_m_self) + Py_DecRef(space, py_func.c_m_module) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -193,7 +193,7 @@ if not obj: PyErr_NoMemory(space) obj.c_ob_type = type - _Py_NewReference(space, obj) + obj.c_ob_refcnt = 1 return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) @@ -381,6 +381,15 @@ This is the equivalent of the Python expression hash(o).""" return space.int_w(space.hash(w_obj)) + at cpython_api([PyObject], PyObject) +def PyObject_Dir(space, w_o): + """This is equivalent to the Python expression dir(o), returning a (possibly + empty) list of strings appropriate for the object argument, or NULL if there + was an error. If the argument is NULL, this is like the Python dir(), + returning the names of the current locals; in this case, if no execution frame + is active then NULL is returned but PyErr_Occurred() will return false.""" + return space.call_function(space.builtin.get('dir'), w_o) + @cpython_api([PyObject, rffi.CCHARPP, Py_ssize_tP], rffi.INT_real, error=-1) def PyObject_AsCharBuffer(space, obj, bufferp, sizep): """Returns a pointer to a read-only memory location usable as @@ -430,6 +439,8 @@ return 0 +PyBUF_WRITABLE = 0x0001 # Copied from object.h + @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): @@ -445,6 +456,18 @@ view.c_len = length view.c_obj = obj Py_IncRef(space, obj) + view.c_itemsize = 1 + if flags & PyBUF_WRITABLE: + rffi.setintfield(view, 'c_readonly', 0) + else: + rffi.setintfield(view, 'c_readonly', 1) + rffi.setintfield(view, 'c_ndim', 0) + view.c_format = lltype.nullptr(rffi.CCHARP.TO) + view.c_shape = lltype.nullptr(Py_ssize_tP.TO) + view.c_strides = lltype.nullptr(Py_ssize_tP.TO) + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -1,7 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CONST_STRING, FILEP, build_type_checkers) + cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers) from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.interpreter.error import OperationError from pypy.module._file.interp_file import W_File @@ -61,11 +62,49 @@ def PyFile_WriteString(space, s, w_p): """Write string s to file object p. Return 0 on success or -1 on failure; the appropriate exception will be set.""" - w_s = space.wrap(rffi.charp2str(s)) - space.call_method(w_p, "write", w_s) + w_str = space.wrap(rffi.charp2str(s)) + space.call_method(w_p, "write", w_str) + return 0 + + at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) +def PyFile_WriteObject(space, w_obj, w_p, flags): + """ + Write object obj to file object p. The only supported flag for flags is + Py_PRINT_RAW; if given, the str() of the object is written + instead of the repr(). Return 0 on success or -1 on failure; the + appropriate exception will be set.""" + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) + space.call_method(w_p, "write", w_str) return 0 @cpython_api([PyObject], PyObject) def PyFile_Name(space, w_p): """Return the name of the file specified by p as a string object.""" - return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) \ No newline at end of file + return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) + + at cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) +def PyFile_SoftSpace(space, w_p, newflag): + """ + This function exists for internal use by the interpreter. Set the + softspace attribute of p to newflag and return the previous value. + p does not have to be a file object for this function to work + properly; any object is supported (thought its only interesting if + the softspace attribute can be set). This function clears any + errors, and will return 0 as the previous value if the attribute + either does not exist or if there were errors in retrieving it. + There is no way to detect errors from this function, but doing so + should not be needed.""" + try: + if rffi.cast(lltype.Signed, newflag): + w_newflag = space.w_True + else: + w_newflag = space.w_False + oldflag = space.int_w(space.getattr(w_p, space.wrap("softspace"))) + space.setattr(w_p, space.wrap("softspace"), w_newflag) + return oldflag + except OperationError, e: + return 0 + diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -17,6 +17,7 @@ class BaseCpyTypedescr(object): basestruct = PyObject.TO + W_BaseObject = W_ObjectObject def get_dealloc(self, space): from pypy.module.cpyext.typeobject import subtype_dealloc @@ -51,10 +52,14 @@ def attach(self, space, pyobj, w_obj): pass - def realize(self, space, ref): - # For most types, a reference cannot exist without - # a real interpreter object - raise InvalidPointerException(str(ref)) + def realize(self, space, obj): + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(self.W_BaseObject, w_type) + track_reference(space, obj, w_obj) + if w_type is not space.gettypefor(self.W_BaseObject): + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj typedescr_cache = {} @@ -369,13 +374,7 @@ obj.c_ob_refcnt = 1 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) - else: - assert False, "Please add more cases in _Py_NewReference()" + get_typedescr(w_type.instancetypedef).realize(space, obj) def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,12 +1,19 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) +from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref from pypy.rpython.lltypesystem import rffi, lltype PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) cpython_struct( - "PyInterpreterState", [('next', PyInterpreterState)], PyInterpreterStateStruct) -PyThreadState = lltype.Ptr(cpython_struct("PyThreadState", [('interp', PyInterpreterState)])) + "PyInterpreterState", + [('next', PyInterpreterState)], + PyInterpreterStateStruct) +PyThreadState = lltype.Ptr(cpython_struct( + "PyThreadState", + [('interp', PyInterpreterState), + ('dict', PyObject), + ])) @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): @@ -38,41 +45,49 @@ return 1 # XXX: might be generally useful -def encapsulator(T, flavor='raw'): +def encapsulator(T, flavor='raw', dealloc=None): class MemoryCapsule(object): - def __init__(self, alloc=True): - if alloc: + def __init__(self, space): + self.space = space + if space is not None: self.memory = lltype.malloc(T, flavor=flavor) else: self.memory = lltype.nullptr(T) def __del__(self): if self.memory: + if dealloc and self.space: + dealloc(self.memory, self.space) lltype.free(self.memory, flavor=flavor) return MemoryCapsule -ThreadStateCapsule = encapsulator(PyThreadState.TO) +def ThreadState_dealloc(ts, space): + assert space is not None + Py_DecRef(space, ts.c_dict) +ThreadStateCapsule = encapsulator(PyThreadState.TO, + dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext -ExecutionContext.cpyext_threadstate = ThreadStateCapsule(alloc=False) +ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) - def new_thread_state(self): - capsule = ThreadStateCapsule() + def new_thread_state(self, space): + capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state + ts.c_dict = make_ref(space, space.newdict()) return capsule def get_thread_state(self, space): ec = space.getexecutioncontext() - return self._get_thread_state(ec).memory + return self._get_thread_state(space, ec).memory - def _get_thread_state(self, ec): + def _get_thread_state(self, space, ec): if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): - ec.cpyext_threadstate = self.new_thread_state() + ec.cpyext_threadstate = self.new_thread_state(space) return ec.cpyext_threadstate @@ -81,6 +96,11 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) + at cpython_api([], PyObject, error=CANNOT_FAIL) +def PyThreadState_GetDict(space): + state = space.fromcache(InterpreterState) + return state.get_thread_state(space).c_dict + @cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument diff --git a/pypy/module/cpyext/pythonrun.py b/pypy/module/cpyext/pythonrun.py --- a/pypy/module/cpyext/pythonrun.py +++ b/pypy/module/cpyext/pythonrun.py @@ -14,6 +14,20 @@ value.""" return space.fromcache(State).get_programname() + at cpython_api([], rffi.CCHARP) +def Py_GetVersion(space): + """Return the version of this Python interpreter. This is a + string that looks something like + + "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]" + + The first word (up to the first space character) is the current + Python version; the first three characters are the major and minor + version separated by a period. The returned string points into + static storage; the caller should not modify its value. The value + is available to Python code as sys.version.""" + return space.fromcache(State).get_version() + @cpython_api([lltype.Ptr(lltype.FuncType([], lltype.Void))], rffi.INT_real, error=-1) def Py_AtExit(space, func_ptr): """Register a cleanup function to be called by Py_Finalize(). The cleanup diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py --- a/pypy/module/cpyext/setobject.py +++ b/pypy/module/cpyext/setobject.py @@ -54,6 +54,20 @@ return 0 + at cpython_api([PyObject], PyObject) +def PySet_Pop(space, w_set): + """Return a new reference to an arbitrary object in the set, and removes the + object from the set. Return NULL on failure. Raise KeyError if the + set is empty. Raise a SystemError if set is an not an instance of + set or its subtype.""" + return space.call_method(w_set, "pop") + + at cpython_api([PyObject], rffi.INT_real, error=-1) +def PySet_Clear(space, w_set): + """Empty an existing set of all elements.""" + space.call_method(w_set, 'clear') + return 0 + @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PySet_GET_SIZE(space, w_s): """Macro form of PySet_Size() without error checking.""" diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -185,6 +185,15 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.wrap(res) +def wrap_delitem(space, w_self, w_args, func): + func_target = rffi.cast(objobjargproc, func) + check_num_args(space, w_args, 1) + w_key, = space.fixedview(w_args) + res = generic_cpy_call(space, func_target, w_self, w_key, None) + if rffi.cast(lltype.Signed, res) == -1: + space.fromcache(State).check_and_raise_exception(always=True) + return space.w_None + def wrap_ssizessizeargfunc(space, w_self, w_args, func): func_target = rffi.cast(ssizessizeargfunc, func) check_num_args(space, w_args, 2) @@ -291,6 +300,14 @@ def slot_nb_int(space, w_self): return space.int(w_self) + at cpython_api([PyObject], PyObject, external=False) +def slot_tp_iter(space, w_self): + return space.iter(w_self) + + at cpython_api([PyObject], PyObject, external=False) +def slot_tp_iternext(space, w_self): + return space.next(w_self) + from pypy.rlib.nonconst import NonConstant SLOTS = {} @@ -632,6 +649,19 @@ TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), ) +# partial sort to solve some slot conflicts: +# Number slots before Mapping slots before Sequence slots. +# These are the only conflicts between __name__ methods +def slotdef_sort_key(slotdef): + if slotdef.slot_name.startswith('tp_as_number'): + return 1 + if slotdef.slot_name.startswith('tp_as_mapping'): + return 2 + if slotdef.slot_name.startswith('tp_as_sequence'): + return 3 + return 0 +slotdefs = sorted(slotdefs, key=slotdef_sort_key) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) for x in slotdefs]) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -10,6 +10,7 @@ self.space = space self.reset() self.programname = lltype.nullptr(rffi.CCHARP.TO) + self.version = lltype.nullptr(rffi.CCHARP.TO) def reset(self): from pypy.module.cpyext.modsupport import PyMethodDef @@ -102,6 +103,15 @@ lltype.render_immortal(self.programname) return self.programname + def get_version(self): + if not self.version: + space = self.space + w_version = space.sys.get('version') + version = space.str_w(w_version) + self.version = rffi.str2charp(version) + lltype.render_immortal(self.version) + return self.version + def find_extension(self, name, path): from pypy.module.cpyext.modsupport import PyImport_AddModule from pypy.interpreter.module import Module diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -250,6 +250,26 @@ s = rffi.charp2str(string) return space.new_interned_str(s) + at cpython_api([PyObjectP], lltype.Void) +def PyString_InternInPlace(space, string): + """Intern the argument *string in place. The argument must be the + address of a pointer variable pointing to a Python string object. + If there is an existing interned string that is the same as + *string, it sets *string to it (decrementing the reference count + of the old string object and incrementing the reference count of + the interned string object), otherwise it leaves *string alone and + interns it (incrementing its reference count). (Clarification: + even though there is a lot of talk about reference counts, think + of this function as reference-count-neutral; you own the object + after the call if and only if you owned it before the call.) + + This function is not available in 3.x and does not have a PyBytes + alias.""" + w_str = from_ref(space, string[0]) + w_str = space.new_interned_w_str(w_str) + Py_DecRef(space, string[0]) + string[0] = make_ref(space, w_str) + @cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject) def PyString_AsEncodedObject(space, w_str, encoding, errors): """Encode a string object using the codec registered for encoding and return diff --git a/pypy/module/cpyext/structmemberdefs.py b/pypy/module/cpyext/structmemberdefs.py --- a/pypy/module/cpyext/structmemberdefs.py +++ b/pypy/module/cpyext/structmemberdefs.py @@ -1,3 +1,5 @@ +# These constants are also in include/structmember.h + T_SHORT = 0 T_INT = 1 T_LONG = 2 @@ -18,3 +20,6 @@ T_ULONGLONG = 18 READONLY = RO = 1 +READ_RESTRICTED = 2 +WRITE_RESTRICTED = 4 +RESTRICTED = READ_RESTRICTED | WRITE_RESTRICTED diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer + cpython_api, PyObject, PyObjectP, CANNOT_FAIL ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,6 +10,7 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP +Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -32,10 +33,6 @@ def _PyObject_Del(space, op): raise NotImplementedError - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyObject_CheckBuffer(space, obj): - raise NotImplementedError - @cpython_api([rffi.CCHARP], Py_ssize_t, error=CANNOT_FAIL) def PyBuffer_SizeFromFormat(space, format): """Return the implied ~Py_buffer.itemsize from the struct-stype @@ -185,16 +182,6 @@ used as the positional and keyword parameters to the object's constructor.""" raise NotImplementedError - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_Check(space, co): - """Return true if co is a code object""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_GetNumFree(space, co): - """Return the number of free variables in co.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -684,28 +671,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) -def PyFile_SoftSpace(space, p, newflag): - """ - This function exists for internal use by the interpreter. Set the - softspace attribute of p to newflag and return the previous value. - p does not have to be a file object for this function to work properly; any - object is supported (thought its only interesting if the softspace - attribute can be set). This function clears any errors, and will return 0 - as the previous value if the attribute either does not exist or if there were - errors in retrieving it. There is no way to detect errors from this function, - but doing so should not be needed.""" - raise NotImplementedError - - at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) -def PyFile_WriteObject(space, obj, p, flags): - """ - Write object obj to file object p. The only supported flag for flags is - Py_PRINT_RAW; if given, the str() of the object is written - instead of the repr(). Return 0 on success or -1 on failure; the - appropriate exception will be set.""" - raise NotImplementedError - @cpython_api([], PyObject) def PyFloat_GetInfo(space): """Return a structseq instance which contains information about the @@ -1097,19 +1062,6 @@ raise NotImplementedError @cpython_api([], rffi.CCHARP) -def Py_GetVersion(space): - """Return the version of this Python interpreter. This is a string that looks - something like - - "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]" - - The first word (up to the first space character) is the current Python version; - the first three characters are the major and minor version separated by a - period. The returned string points into static storage; the caller should not - modify its value. The value is available to Python code as sys.version.""" - raise NotImplementedError - - at cpython_api([], rffi.CCHARP) def Py_GetPlatform(space): """Return the platform identifier for the current platform. On Unix, this is formed from the"official" name of the operating system, converted to lower @@ -1331,28 +1283,6 @@ that haven't been explicitly destroyed at that point.""" raise NotImplementedError - at cpython_api([rffi.VOIDP], lltype.Void) -def Py_AddPendingCall(space, func): - """Post a notification to the Python main thread. If successful, func will - be called with the argument arg at the earliest convenience. func will be - called having the global interpreter lock held and can thus use the full - Python API and can take any action such as setting object attributes to - signal IO completion. It must return 0 on success, or -1 signalling an - exception. The notification function won't be interrupted to perform another - asynchronous notification recursively, but it can still be interrupted to - switch threads if the global interpreter lock is released, for example, if it - calls back into Python code. - - This function returns 0 on success in which case the notification has been - scheduled. Otherwise, for example if the notification buffer is full, it - returns -1 without setting any exception. - - This function can be called on any thread, be it a Python thread or some - other system thread. If it is a Python thread, it doesn't matter if it holds - the global interpreter lock or not. - """ - raise NotImplementedError - @cpython_api([Py_tracefunc, PyObject], lltype.Void) def PyEval_SetProfile(space, func, obj): """Set the profiler function to func. The obj parameter is passed to the @@ -1685,15 +1615,6 @@ """ raise NotImplementedError - at cpython_api([PyObject], PyObject) -def PyObject_Dir(space, o): - """This is equivalent to the Python expression dir(o), returning a (possibly - empty) list of strings appropriate for the object argument, or NULL if there - was an error. If the argument is NULL, this is like the Python dir(), - returning the names of the current locals; in this case, if no execution frame - is active then NULL is returned but PyErr_Occurred() will return false.""" - raise NotImplementedError - @cpython_api([], PyFrameObject) def PyEval_GetFrame(space): """Return the current thread state's frame, which is NULL if no frame is @@ -1802,34 +1723,6 @@ building-up new frozensets with PySet_Add().""" raise NotImplementedError - at cpython_api([PyObject], PyObject) -def PySet_Pop(space, set): - """Return a new reference to an arbitrary object in the set, and removes the - object from the set. Return NULL on failure. Raise KeyError if the - set is empty. Raise a SystemError if set is an not an instance of - set or its subtype.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=-1) -def PySet_Clear(space, set): - """Empty an existing set of all elements.""" - raise NotImplementedError - - at cpython_api([PyObjectP], lltype.Void) -def PyString_InternInPlace(space, string): - """Intern the argument *string in place. The argument must be the address of a - pointer variable pointing to a Python string object. If there is an existing - interned string that is the same as *string, it sets *string to it - (decrementing the reference count of the old string object and incrementing the - reference count of the interned string object), otherwise it leaves *string - alone and interns it (incrementing its reference count). (Clarification: even - though there is a lot of talk about reference counts, think of this function as - reference-count-neutral; you own the object after the call if and only if you - owned it before the call.) - - This function is not available in 3.x and does not have a PyBytes alias.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.CCHARP], PyObject) def PyString_Decode(space, s, size, encoding, errors): """Create an object by decoding size bytes of the encoded buffer s using the @@ -1950,26 +1843,6 @@ """ raise NotImplementedError - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISTITLE(space, ch): - """Return 1 or 0 depending on whether ch is a titlecase character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISDIGIT(space, ch): - """Return 1 or 0 depending on whether ch is a digit character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISNUMERIC(space, ch): - """Return 1 or 0 depending on whether ch is a numeric character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISALPHA(space, ch): - """Return 1 or 0 depending on whether ch is an alphabetic character.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP], PyObject) def PyUnicode_FromFormat(space, format): """Take a C printf()-style format string and a variable number of @@ -2414,17 +2287,6 @@ use the default error handling.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1) -def PyUnicode_Tailmatch(space, str, substr, start, end, direction): - """Return 1 if substr matches str*[*start:end] at the given tail end - (direction == -1 means to do a prefix match, direction == 1 a suffix match), - 0 otherwise. Return -1 if an error occurred. - - This function used an int type for start and end. This - might require changes in your code for properly supporting 64-bit - systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2) def PyUnicode_Find(space, str, substr, start, end, direction): """Return the first position of substr in str*[*start:end] using the given @@ -2448,16 +2310,6 @@ properly supporting 64-bit systems.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) -def PyUnicode_Replace(space, str, substr, replstr, maxcount): - """Replace at most maxcount occurrences of substr in str with replstr and - return the resulting Unicode object. maxcount == -1 means replace all - occurrences. - - This function used an int type for maxcount. This might - require changes in your code for properly supporting 64-bit systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyUnicode_RichCompare(space, left, right, op): """Rich compare two unicode strings and return one of the following: @@ -2631,17 +2483,6 @@ source code is read from fp instead of an in-memory string.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, PyCompilerFlags], PyObject) -def PyRun_StringFlags(space, str, start, globals, locals, flags): - """Execute Python source code from str in the context specified by the - dictionaries globals and locals with the compiler flags specified by - flags. The parameter start specifies the start token that should be used to - parse the source code. - - Returns the result of executing the code as a Python object, or NULL if an - exception was raised.""" - raise NotImplementedError - @cpython_api([FILE, rffi.CCHARP, rffi.INT_real, PyObject, PyObject, rffi.INT_real], PyObject) def PyRun_FileEx(space, fp, filename, start, globals, locals, closeit): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -2662,13 +2503,6 @@ returns.""" raise NotImplementedError - at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) -def PyEval_EvalCode(space, co, globals, locals): - """This is a simplified interface to PyEval_EvalCodeEx(), with just - the code object, and the dictionaries of global and local variables. - The other arguments are set to NULL.""" - raise NotImplementedError - @cpython_api([PyCodeObject, PyObject, PyObject, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObject], PyObject) def PyEval_EvalCodeEx(space, co, globals, locals, args, argcount, kws, kwcount, defs, defcount, closure): """Evaluate a precompiled code object, given a particular environment for its @@ -2693,12 +2527,6 @@ throw() methods of generator objects.""" raise NotImplementedError - at cpython_api([PyCompilerFlags], rffi.INT_real, error=CANNOT_FAIL) -def PyEval_MergeCompilerFlags(space, cf): - """This function changes the flags of the current evaluation frame, and returns - true on success, false on failure.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyWeakref_Check(space, ob): """Return true if ob is either a reference or proxy object. diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -38,3 +38,31 @@ def Py_MakePendingCalls(space): return 0 +pending_call = lltype.Ptr(lltype.FuncType([rffi.VOIDP], rffi.INT_real)) + at cpython_api([pending_call, rffi.VOIDP], rffi.INT_real, error=-1) +def Py_AddPendingCall(space, func, arg): + """Post a notification to the Python main thread. If successful, + func will be called with the argument arg at the earliest + convenience. func will be called having the global interpreter + lock held and can thus use the full Python API and can take any + action such as setting object attributes to signal IO completion. + It must return 0 on success, or -1 signalling an exception. The + notification function won't be interrupted to perform another + asynchronous notification recursively, but it can still be + interrupted to switch threads if the global interpreter lock is + released, for example, if it calls back into Python code. + + This function returns 0 on success in which case the notification + has been scheduled. Otherwise, for example if the notification + buffer is full, it returns -1 without setting any exception. + + This function can be called on any thread, be it a Python thread + or some other system thread. If it is a Python thread, it doesn't + matter if it holds the global interpreter lock or not. + """ + return -1 + +thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) + at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) +def PyThread_start_new_thread(space, func, arg): + return -1 diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -43,6 +43,15 @@ assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + def test_slice_object(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + assert arr[slice(1,3)].tolist() == [2,3] + arr[slice(1,3)] = module.array('i', [21, 22, 23]) + assert arr.tolist() == [1, 21, 22, 23, 4] + del arr[slice(1, 3)] + assert arr.tolist() == [1, 23, 4] + def test_buffer(self): module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -744,6 +744,22 @@ print p assert 'py' in p + def test_get_version(self): + mod = self.import_extension('foo', [ + ('get_version', 'METH_NOARGS', + ''' + char* name1 = Py_GetVersion(); + char* name2 = Py_GetVersion(); + if (name1 != name2) + Py_RETURN_FALSE; + return PyString_FromString(name1); + ''' + ), + ]) + p = mod.get_version() + print p + assert 'PyPy' in p + def test_no_double_imports(self): import sys, os try: diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -2,6 +2,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP from pypy.module.cpyext.pyobject import make_ref, from_ref +from pypy.interpreter.error import OperationError class TestDictObject(BaseApiTest): def test_dict(self, space, api): @@ -110,3 +111,44 @@ assert space.eq_w(space.len(w_copy), space.len(w_dict)) assert space.eq_w(w_copy, w_dict) + + def test_iterkeys(self, space, api): + w_dict = space.sys.getdict(space) + py_dict = make_ref(space, w_dict) + + ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + + keys_w = [] + values_w = [] + try: + ppos[0] = 0 + while api.PyDict_Next(w_dict, ppos, pkey, None): + w_key = from_ref(space, pkey[0]) + keys_w.append(w_key) + ppos[0] = 0 + while api.PyDict_Next(w_dict, ppos, None, pvalue): + w_value = from_ref(space, pvalue[0]) + values_w.append(w_value) + finally: + lltype.free(ppos, flavor='raw') + lltype.free(pkey, flavor='raw') + lltype.free(pvalue, flavor='raw') + + api.Py_DecRef(py_dict) # release borrowed references + + assert space.eq_w(space.newlist(keys_w), + space.call_method(w_dict, "keys")) + assert space.eq_w(space.newlist(values_w), + space.call_method(w_dict, "values")) + + def test_dictproxy(self, space, api): + w_dict = space.sys.get('modules') + w_proxy = api.PyDictProxy_New(w_dict) + assert space.is_true(space.contains(w_proxy, space.wrap('sys'))) + raises(OperationError, space.setitem, + w_proxy, space.wrap('sys'), space.w_None) + raises(OperationError, space.delitem, + w_proxy, space.wrap('sys')) + raises(OperationError, space.call_method, w_proxy, 'clear') diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -2,9 +2,10 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.eval import ( - Py_single_input, Py_file_input, Py_eval_input) + Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags) from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP from pypy.interpreter.gateway import interp2app +from pypy.interpreter.astcompiler import consts from pypy.tool.udir import udir import sys, os @@ -63,6 +64,22 @@ assert space.int_w(w_res) == 10 + def test_evalcode(self, space, api): + w_f = space.appexec([], """(): + def f(*args): + assert isinstance(args, tuple) + return len(args) + 8 + return f + """) + + w_t = space.newtuple([space.wrap(1), space.wrap(2)]) + w_globals = space.newdict() + w_locals = space.newdict() + space.setitem(w_locals, space.wrap("args"), w_t) + w_res = api.PyEval_EvalCode(w_f.code, w_globals, w_locals) + + assert space.int_w(w_res) == 10 + def test_run_simple_string(self, space, api): def run(code): buf = rffi.str2charp(code) @@ -96,6 +113,20 @@ assert 42 * 43 == space.unwrap( api.PyObject_GetItem(w_globals, space.wrap("a"))) + def test_run_string_flags(self, space, api): + flags = lltype.malloc(PyCompilerFlags, flavor='raw') + flags.c_cf_flags = rffi.cast(rffi.INT, consts.PyCF_SOURCE_IS_UTF8) + w_globals = space.newdict() + buf = rffi.str2charp("a = u'caf\xc3\xa9'") + try: + api.PyRun_StringFlags(buf, Py_single_input, + w_globals, w_globals, flags) + finally: + rffi.free_charp(buf) + w_a = space.getitem(w_globals, space.wrap("a")) + assert space.unwrap(w_a) == u'caf\xe9' + lltype.free(flags, flavor='raw') + def test_run_file(self, space, api): filepath = udir / "cpyext_test_runfile.py" filepath.write("raise ZeroDivisionError") @@ -256,3 +287,21 @@ print dir(mod) print mod.__dict__ assert mod.f(42) == 47 + + def test_merge_compiler_flags(self): + module = self.import_extension('foo', [ + ("get_flags", "METH_NOARGS", + """ + PyCompilerFlags flags; + flags.cf_flags = 0; + int result = PyEval_MergeCompilerFlags(&flags); + return Py_BuildValue("ii", result, flags.cf_flags); + """), + ]) + assert module.get_flags() == (0, 0) + + ns = {'module':module} + exec """from __future__ import division \nif 1: + def nested_flags(): + return module.get_flags()""" in ns + assert ns['nested_flags']() == (1, 0x2000) # CO_FUTURE_DIVISION diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -81,6 +81,14 @@ rffi.free_charp(filename) rffi.free_charp(funcname) + def test_getnumfree(self, space, api): + w_function = space.appexec([], """(): + a = 5 + def method(x): return a, x + return method + """) + assert api.PyCode_GetNumFree(w_function.code) == 1 + def test_classmethod(self, space, api): w_function = space.appexec([], """(): def method(x): return x diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -65,4 +65,97 @@ values = module.values() types = [type(x) for x in values] assert types == [int, long, int, int] - + + def test_int_subtype(self): + module = self.import_extension( + 'foo', [ + ("newEnum", "METH_VARARGS", + """ + EnumObject *enumObj; + long intval; + PyObject *name; + + if (!PyArg_ParseTuple(args, "Oi", &name, &intval)) + return NULL; + + PyType_Ready(&Enum_Type); + enumObj = PyObject_New(EnumObject, &Enum_Type); + if (!enumObj) { + return NULL; + } + + enumObj->ob_ival = intval; + Py_INCREF(name); + enumObj->ob_name = name; + + return (PyObject *)enumObj; + """), + ], + prologue=""" + typedef struct + { + PyObject_HEAD + long ob_ival; + PyObject* ob_name; + } EnumObject; + + static void + enum_dealloc(EnumObject *op) + { + Py_DECREF(op->ob_name); + Py_TYPE(op)->tp_free((PyObject *)op); + } + + static PyMemberDef enum_members[] = { + {"name", T_OBJECT, offsetof(EnumObject, ob_name), 0, NULL}, + {NULL} /* Sentinel */ + }; + + PyTypeObject Enum_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Enum", + /*tp_basicsize*/ sizeof(EnumObject), + /*tp_itemsize*/ 0, + /*tp_dealloc*/ enum_dealloc, + /*tp_print*/ 0, + /*tp_getattr*/ 0, + /*tp_setattr*/ 0, + /*tp_compare*/ 0, + /*tp_repr*/ 0, + /*tp_as_number*/ 0, + /*tp_as_sequence*/ 0, + /*tp_as_mapping*/ 0, + /*tp_hash*/ 0, + /*tp_call*/ 0, + /*tp_str*/ 0, + /*tp_getattro*/ 0, + /*tp_setattro*/ 0, + /*tp_as_buffer*/ 0, + /*tp_flags*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + /*tp_doc*/ 0, + /*tp_traverse*/ 0, + /*tp_clear*/ 0, + /*tp_richcompare*/ 0, + /*tp_weaklistoffset*/ 0, + /*tp_iter*/ 0, + /*tp_iternext*/ 0, + /*tp_methods*/ 0, + /*tp_members*/ enum_members, + /*tp_getset*/ 0, + /*tp_base*/ &PyInt_Type, + /*tp_dict*/ 0, + /*tp_descr_get*/ 0, + /*tp_descr_set*/ 0, + /*tp_dictoffset*/ 0, + /*tp_init*/ 0, + /*tp_alloc*/ 0, + /*tp_new*/ 0 + }; + """) + + a = module.newEnum("ULTIMATE_ANSWER", 42) + assert type(a).__name__ == "Enum" + assert isinstance(a, int) + assert a == int(a) == 42 + assert a.name == "ULTIMATE_ANSWER" diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -9,7 +9,7 @@ class AppTestMethodObject(AppTestCpythonExtensionBase): def test_call_METH(self): - mod = self.import_extension('foo', [ + mod = self.import_extension('MyModule', [ ('getarg_O', 'METH_O', ''' Py_INCREF(args); @@ -51,11 +51,23 @@ } ''' ), + ('getModule', 'METH_O', + ''' + if(PyCFunction_Check(args)) { + PyCFunctionObject* func = (PyCFunctionObject*)args; + Py_INCREF(func->m_module); + return func->m_module; + } + else { + Py_RETURN_FALSE; + } + ''' + ), ('isSameFunction', 'METH_O', ''' PyCFunction ptr = PyCFunction_GetFunction(args); if (!ptr) return NULL; - if (ptr == foo_getarg_O) + if (ptr == MyModule_getarg_O) Py_RETURN_TRUE; else Py_RETURN_FALSE; @@ -76,6 +88,7 @@ assert mod.getarg_OLD(1, 2) == (1, 2) assert mod.isCFunction(mod.getarg_O) == "getarg_O" + assert mod.getModule(mod.getarg_O) == 'MyModule' assert mod.isSameFunction(mod.getarg_O) raises(TypeError, mod.isSameFunction, 1) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -191,6 +191,11 @@ assert api.PyObject_Unicode(space.wrap("\xe9")) is None api.PyErr_Clear() + def test_dir(self, space, api): + w_dir = api.PyObject_Dir(space.sys) + assert space.isinstance_w(w_dir, space.w_list) + assert space.is_true(space.contains(w_dir, space.wrap('modules'))) + class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.api import fopen, fclose, fwrite from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.rpython.lltypesystem import rffi, lltype from pypy.tool.udir import udir import pytest @@ -77,3 +78,28 @@ out = out.replace('\r\n', '\n') assert out == "test\n" + def test_file_writeobject(self, space, api, capfd): + w_obj = space.wrap("test\n") + w_stdout = space.sys.get("stdout") + api.PyFile_WriteObject(w_obj, w_stdout, Py_PRINT_RAW) + api.PyFile_WriteObject(w_obj, w_stdout, 0) + space.call_method(w_stdout, "flush") + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == "test\n'test\\n'" + + def test_file_softspace(self, space, api, capfd): + w_stdout = space.sys.get("stdout") + assert api.PyFile_SoftSpace(w_stdout, 1) == 0 + assert api.PyFile_SoftSpace(w_stdout, 0) == 1 + + api.PyFile_SoftSpace(w_stdout, 1) + w_ns = space.newdict() + space.exec_("print 1,", w_ns, w_ns) + space.exec_("print 2,", w_ns, w_ns) + api.PyFile_SoftSpace(w_stdout, 0) + space.exec_("print 3", w_ns, w_ns) + space.call_method(w_stdout, "flush") + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == " 1 23\n" diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -2,6 +2,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState +from pypy.module.cpyext.pyobject import from_ref class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -49,3 +50,10 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) + + def test_threadstate_dict(self, space, api): + ts = api.PyThreadState_Get() + ref = ts.c_dict + assert ref == api.PyThreadState_GetDict() + w_obj = from_ref(space, ref) + assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py --- a/pypy/module/cpyext/test/test_setobject.py +++ b/pypy/module/cpyext/test/test_setobject.py @@ -32,3 +32,13 @@ w_set = api.PySet_New(space.wrap([1,2,3,4])) assert api.PySet_Contains(w_set, space.wrap(1)) assert not api.PySet_Contains(w_set, space.wrap(0)) + + def test_set_pop_clear(self, space, api): + w_set = api.PySet_New(space.wrap([1,2,3,4])) + w_obj = api.PySet_Pop(w_set) + assert space.int_w(w_obj) in (1,2,3,4) + assert space.len_w(w_set) == 3 + api.PySet_Clear(w_set) + assert space.len_w(w_set) == 0 + + diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -166,6 +166,20 @@ res = module.test_string_format(1, "xyz") assert res == "bla 1 ble xyz\n" + def test_intern_inplace(self): + module = self.import_extension('foo', [ + ("test_intern_inplace", "METH_O", + ''' + PyObject *s = args; + Py_INCREF(s); + PyString_InternInPlace(&s); + return s; + ''' + ) + ]) + # This does not test much, but at least the refcounts are checked. + assert module.test_intern_inplace('s') == 's' + class TestString(BaseApiTest): def test_string_resize(self, space, api): py_str = new_empty_str(space, 10) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -425,3 +425,32 @@ ''') obj = module.new_obj() raises(ZeroDivisionError, obj.__setitem__, 5, None) + + def test_tp_iter(self): + module = self.import_extension('foo', [ + ("tp_iter", "METH_O", + ''' + if (!args->ob_type->tp_iter) + { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + return args->ob_type->tp_iter(args); + ''' + ), + ("tp_iternext", "METH_O", + ''' + if (!args->ob_type->tp_iternext) + { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + return args->ob_type->tp_iternext(args); + ''' + ) + ]) + l = [1] + it = module.tp_iter(l) + assert type(it) is type(iter([])) + assert module.tp_iternext(it) == 1 + raises(StopIteration, module.tp_iternext, it) diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -204,8 +204,18 @@ assert api.Py_UNICODE_ISSPACE(unichr(char)) assert not api.Py_UNICODE_ISSPACE(u'a') + assert api.Py_UNICODE_ISALPHA(u'a') + assert not api.Py_UNICODE_ISALPHA(u'0') + assert api.Py_UNICODE_ISALNUM(u'a') + assert api.Py_UNICODE_ISALNUM(u'0') + assert not api.Py_UNICODE_ISALNUM(u'+') + assert api.Py_UNICODE_ISDECIMAL(u'\u0660') assert not api.Py_UNICODE_ISDECIMAL(u'a') + assert api.Py_UNICODE_ISDIGIT(u'9') + assert not api.Py_UNICODE_ISDIGIT(u'@') + assert api.Py_UNICODE_ISNUMERIC(u'9') + assert not api.Py_UNICODE_ISNUMERIC(u'@') for char in [0x0a, 0x0d, 0x1c, 0x1d, 0x1e, 0x85, 0x2028, 0x2029]: assert api.Py_UNICODE_ISLINEBREAK(unichr(char)) @@ -216,6 +226,9 @@ assert not api.Py_UNICODE_ISUPPER(u'a') assert not api.Py_UNICODE_ISLOWER(u'�') assert api.Py_UNICODE_ISUPPER(u'�') + assert not api.Py_UNICODE_ISTITLE(u'A') + assert api.Py_UNICODE_ISTITLE( + u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}') def test_TOLOWER(self, space, api): assert api.Py_UNICODE_TOLOWER(u'�') == u'�' @@ -420,3 +433,27 @@ w_seq = space.wrap([u'a', u'b']) w_joined = api.PyUnicode_Join(w_sep, w_seq) assert space.unwrap(w_joined) == u'ab' + + def test_fromordinal(self, space, api): + w_char = api.PyUnicode_FromOrdinal(65) + assert space.unwrap(w_char) == u'A' + w_char = api.PyUnicode_FromOrdinal(0) + assert space.unwrap(w_char) == u'\0' + w_char = api.PyUnicode_FromOrdinal(0xFFFF) + assert space.unwrap(w_char) == u'\uFFFF' + + def test_replace(self, space, api): + w_str = space.wrap(u"abababab") + w_substr = space.wrap(u"a") + w_replstr = space.wrap(u"z") + assert u"zbzbabab" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2)) + assert u"zbzbzbzb" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1)) + + def test_tailmatch(self, space, api): + w_str = space.wrap(u"abcdef") + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 10, 1) == 1 + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1 + self.raises(space, api, TypeError, + api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject, unicodetype, stringtype from pypy.rlib import runicode from pypy.tool.sourcetools import func_renamer import sys @@ -89,6 +89,11 @@ return unicodedb.isspace(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISALPHA(space, ch): + """Return 1 or 0 depending on whether ch is an alphabetic character.""" + return unicodedb.isalpha(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALNUM(space, ch): """Return 1 or 0 depending on whether ch is an alphanumeric character.""" return unicodedb.isalnum(ord(ch)) @@ -104,6 +109,16 @@ return unicodedb.isdecimal(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISDIGIT(space, ch): + """Return 1 or 0 depending on whether ch is a digit character.""" + return unicodedb.isdigit(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISNUMERIC(space, ch): + """Return 1 or 0 depending on whether ch is a numeric character.""" + return unicodedb.isnumeric(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLOWER(space, ch): """Return 1 or 0 depending on whether ch is a lowercase character.""" return unicodedb.islower(ord(ch)) @@ -113,6 +128,11 @@ """Return 1 or 0 depending on whether ch is an uppercase character.""" return unicodedb.isupper(ord(ch)) + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISTITLE(space, ch): + """Return 1 or 0 depending on whether ch is a titlecase character.""" + return unicodedb.istitle(ord(ch)) + @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" @@ -155,6 +175,11 @@ except KeyError: return -1.0 + at cpython_api([], Py_UNICODE, error=CANNOT_FAIL) +def PyUnicode_GetMax(space): + """Get the maximum ordinal for a Unicode character.""" + return unichr(runicode.MAXUNICODE) + @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): """Return a pointer to the internal buffer of the object. o has to be a @@ -395,6 +420,16 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([rffi.INT_real], PyObject) +def PyUnicode_FromOrdinal(space, ordinal): + """Create a Unicode Object from the given Unicode code point ordinal. + + The ordinal must be in range(0x10000) on narrow Python builds + (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is + raised in case it is not.""" + w_ordinal = space.wrap(rffi.cast(lltype.Signed, ordinal)) + return space.call_function(space.builtin.get('unichr'), w_ordinal) + @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far @@ -538,6 +573,28 @@ @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Join(space, w_sep, w_seq): - """Join a sequence of strings using the given separator and return the resulting - Unicode string.""" + """Join a sequence of strings using the given separator and return + the resulting Unicode string.""" return space.call_method(w_sep, 'join', w_seq) + + at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) +def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount): + """Replace at most maxcount occurrences of substr in str with replstr and + return the resulting Unicode object. maxcount == -1 means replace all + occurrences.""" + return space.call_method(w_str, "replace", w_substr, w_replstr, + space.wrap(maxcount)) + + at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], + rffi.INT_real, error=-1) +def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction): + """Return 1 if substr matches str[start:end] at the given tail end + (direction == -1 means to do a prefix match, direction == 1 a + suffix match), 0 otherwise. Return -1 if an error occurred.""" + str = space.unicode_w(w_str) + substr = space.unicode_w(w_substr) + if rffi.cast(lltype.Signed, direction) >= 0: + return stringtype.stringstartswith(str, substr, start, end) + else: + return stringtype.stringendswith(str, substr, start, end) + diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -357,7 +357,7 @@ def test_cannot_write_pyc(self): import sys, os - p = os.path.join(sys.path[-1], 'readonly') + p = os.path.join(sys.path[0], 'readonly') try: os.chmod(p, 0555) except: diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -85,10 +85,12 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), ("copysign", "copysign"), ("cos", "cos"), + ("cosh", "cosh"), ("divide", "divide"), ("true_divide", "true_divide"), ("equal", "equal"), @@ -108,9 +110,11 @@ ("reciprocal", "reciprocal"), ("sign", "sign"), ("sin", "sin"), + ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), ("tan", "tan"), + ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -42,10 +42,10 @@ return self._get_dtype(space) def descr_str(self, space): - return self.descr_repr(space) + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) - def descr_repr(self, space): - return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + def descr_format(self, space, w_spec): + return space.format(self.item(space), w_spec) def descr_int(self, space): box = self.convert_to(W_LongBox._get_dtype(space)) @@ -204,6 +204,10 @@ def get_dtype(self, space): return self.arr.dtype + at unwrap_spec(self=W_GenericBox) +def descr_index(space, self): + return space.index(self.item(space)) + class W_VoidBox(W_FlexibleBox): @unwrap_spec(item=str) def descr_getitem(self, space, item): @@ -255,7 +259,8 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __str__ = interp2app(W_GenericBox.descr_str), - __repr__ = interp2app(W_GenericBox.descr_repr), + __repr__ = interp2app(W_GenericBox.descr_str), + __format__ = interp2app(W_GenericBox.descr_format), __int__ = interp2app(W_GenericBox.descr_int), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), @@ -306,6 +311,8 @@ W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), + + __index__ = interp2app(descr_index), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, @@ -327,36 +334,43 @@ W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __module__ = "numpypy", __new__ = interp2app(W_Int32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __module__ = "numpypy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) if LONG_BIT == 32: @@ -369,6 +383,7 @@ W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -776,8 +776,6 @@ """ Intermediate class for performing binary operations. """ - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): VirtualArray.__init__(self, name, shape, res_dtype) self.ufunc = ufunc @@ -867,8 +865,6 @@ self.right.create_sig(), done_func) class AxisReduce(Call2): - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim): Call2.__init__(self, ufunc, name, shape, dtype, dtype, left, right) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -3,7 +3,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype from pypy.objspace.std.strutil import strip_spaces - +from pypy.rlib import jit FLOAT_SIZE = rffi.sizeof(lltype.Float) @@ -74,17 +74,22 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray([count], dtype=dtype) - ai = a.create_iter() - for i in range(count): - start = i*itemsize - assert start >= 0 - val = dtype.itemtype.runpack_str(s[start:start + itemsize]) - a.dtype.setitem(a, ai.offset, val) - ai = ai.next(1) - + a = W_NDimArray(count, [count], dtype=dtype) + fromstring_loop(a, count, dtype, itemsize, s) return space.wrap(a) +fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', + 'dtype', 's', 'a']) + +def fromstring_loop(a, count, dtype, itemsize, s): + i = 0 + while i < count: + fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, + itemsize=itemsize, s=s, i=i) + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + i += itemsize + @unwrap_spec(s=str, count=int, sep=str) def fromstring(space, s, w_dtype=None, count=-1, sep=''): dtype = space.interp_w(interp_dtype.W_Dtype, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -435,7 +435,11 @@ ("arcsin", "arcsin", 1, {"promote_to_float": True}), ("arccos", "arccos", 1, {"promote_to_float": True}), ("arctan", "arctan", 1, {"promote_to_float": True}), + ("sinh", "sinh", 1, {"promote_to_float": True}), + ("cosh", "cosh", 1, {"promote_to_float": True}), + ("tanh", "tanh", 1, {"promote_to_float": True}), ("arcsinh", "arcsinh", 1, {"promote_to_float": True}), + ("arccosh", "arccosh", 1, {"promote_to_float": True}), ("arctanh", "arctanh", 1, {"promote_to_float": True}), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -390,6 +390,8 @@ assert type(a[1]) is numpy.float64 assert numpy.dtype(float).type is numpy.float64 + assert "{:3f}".format(numpy.float64(3)) == "3.000000" + assert numpy.float64(2.0) == 2.0 assert numpy.float64('23.4') == numpy.float64(23.4) raises(ValueError, numpy.float64, '23.2df') @@ -406,9 +408,9 @@ assert b.m() == 12 def test_long_as_index(self): - skip("waiting for removal of multimethods of __index__") - from _numpypy import int_ + from _numpypy import int_, float64 assert (1, 2, 3)[int_(1)] == 2 + raises(TypeError, lambda: (1, 2, 3)[float64(1)]) def test_int(self): import sys diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,6 +310,33 @@ b = arctan(a) assert math.isnan(b[0]) + def test_sinh(self): + import math + from _numpypy import array, sinh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = sinh(a) + for i in range(len(a)): + assert b[i] == math.sinh(a[i]) + + def test_cosh(self): + import math + from _numpypy import array, cosh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = cosh(a) + for i in range(len(a)): + assert b[i] == math.cosh(a[i]) + + def test_tanh(self): + import math + from _numpypy import array, tanh + + a = array([-1, 0, 1, float('inf'), float('-inf')]) + b = tanh(a) + for i in range(len(a)): + assert b[i] == math.tanh(a[i]) + def test_arcsinh(self): import math from _numpypy import arcsinh @@ -318,6 +345,15 @@ assert math.asinh(v) == arcsinh(v) assert math.isnan(arcsinh(float("nan"))) + def test_arccosh(self): + import math + from _numpypy import arccosh + + for v in [1.0, 1.1, 2]: + assert math.acosh(v) == arccosh(v) + for v in [-1.0, 0, .99]: + assert math.isnan(arccosh(v)) + def test_arctanh(self): import math from _numpypy import arctanh diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -479,38 +479,3 @@ 'int_sub': 3, 'jump': 1, 'setinteriorfield_raw': 1}) - - -class TestNumpyOld(LLJitMixin): - def setup_class(cls): - py.test.skip("old") - from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import get_dtype_cache - - cls.space = FakeSpace() - cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype - - def test_int32_sum(self): - py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " - "deal correctly with int dtypes for this test to " - "work. skip for now until someone feels up to the task") - space = self.space - float64_dtype = self.float64_dtype - int32_dtype = self.int32_dtype - - def f(n): - if NonConstant(False): - dtype = float64_dtype - else: - dtype = int32_dtype - ar = W_NDimArray([n], dtype=dtype) - i = 0 - while i < n: - ar.get_concrete().setitem(i, int32_dtype.box(7)) - i += 1 - v = ar.descr_add(space, ar).descr_sum(space) - assert isinstance(v, IntObject) - return v.intval - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - assert result == f(5) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -574,10 +574,28 @@ return math.atan(v) @simple_unary_op + def sinh(self, v): + return math.sinh(v) + + @simple_unary_op + def cosh(self, v): + return math.cosh(v) + + @simple_unary_op + def tanh(self, v): + return math.tanh(v) + + @simple_unary_op def arcsinh(self, v): return math.asinh(v) @simple_unary_op + def arccosh(self, v): + if v < 1.0: + return rfloat.NAN + return math.acosh(v) + + @simple_unary_op def arctanh(self, v): if v == 1.0 or v == -1.0: return math.copysign(rfloat.INFINITY, v) diff --git a/pypy/module/oracle/interp_error.py b/pypy/module/oracle/interp_error.py --- a/pypy/module/oracle/interp_error.py +++ b/pypy/module/oracle/interp_error.py @@ -72,7 +72,7 @@ get(space).w_InternalError, space.wrap("No Oracle error?")) - self.code = codeptr[0] + self.code = rffi.cast(lltype.Signed, codeptr[0]) self.w_message = config.w_string(space, textbuf) finally: lltype.free(codeptr, flavor='raw') diff --git a/pypy/module/oracle/interp_variable.py b/pypy/module/oracle/interp_variable.py --- a/pypy/module/oracle/interp_variable.py +++ b/pypy/module/oracle/interp_variable.py @@ -359,14 +359,14 @@ # Verifies that truncation or other problems did not take place on # retrieve. if self.isVariableLength: - if rffi.cast(lltype.Signed, self.returnCode[pos]) != 0: + error_code = rffi.cast(lltype.Signed, self.returnCode[pos]) + if error_code != 0: error = W_Error(space, self.environment, "Variable_VerifyFetch()", 0) - error.code = self.returnCode[pos] + error.code = error_code error.message = space.wrap( "column at array pos %d fetched with error: %d" % - (pos, - rffi.cast(lltype.Signed, self.returnCode[pos]))) + (pos, error_code)) w_error = get(space).w_DatabaseError raise OperationError(get(space).w_DatabaseError, diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -60,6 +60,9 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if getattr(pipe, 'returncode', 0) < 0: + raise IOError("subprocess was killed by signal %d" % ( + pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -0,0 +1,26 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestAlloc(BaseTestPyPyC): + + SIZES = dict.fromkeys([2 ** n for n in range(26)] + # up to 32MB + [2 ** n - 1 for n in range(26)]) + + def test_newstr_constant_size(self): + for size in TestAlloc.SIZES: + yield self.newstr_constant_size, size + + def newstr_constant_size(self, size): + src = """if 1: + N = %(size)d + part_a = 'a' * N + part_b = 'b' * N + for i in xrange(20): + ao = '%%s%%s' %% (part_a, part_b) + def main(): + return 42 +""" % {'size': size} + log = self.run(src, [], threshold=10) + assert log.result == 42 + loop, = log.loops_by_filename(self.filepath) + # assert did not crash diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -201,3 +201,28 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("compare", "") # optimized away + def test_super(self): + def main(): + class A(object): + def m(self, x): + return x + 1 + class B(A): + def m(self, x): + return super(B, self).m(x) + i = 0 + while i < 300: + i = B().m(i) + return i + + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i78 = int_lt(i72, 300) + guard_true(i78, descr=...) + guard_not_invalidated(descr=...) + i79 = force_token() + i80 = force_token() + i81 = int_add(i72, 1) + --TICK-- + jump(..., descr=...) + """) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py @@ -97,6 +97,16 @@ tf_b.errcheck = errcheck assert tf_b(-126) == 'hello' + def test_array_to_ptr(self): + ARRAY = c_int * 8 + func = dll._testfunc_ai8 + func.restype = POINTER(c_int) + func.argtypes = [ARRAY] + array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8) + ptr = func(array) + assert ptr[0] == 1 + assert ptr[7] == 8 + class TestFallbackToSlowpath(BaseCTypesTestChecker): diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py @@ -246,6 +246,14 @@ def func(): pass CFUNCTYPE(None, c_int * 3)(func) + def test_array_to_ptr_wrongtype(self): + ARRAY = c_byte * 8 + func = testdll._testfunc_ai8 + func.restype = POINTER(c_int) + func.argtypes = [c_int * 8] + array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8) + py.test.raises(ArgumentError, "func(array)") + ################################################################ if __name__ == '__main__': diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -3,7 +3,7 @@ import py import time -import datetime +from lib_pypy import datetime import copy import os @@ -43,4 +43,4 @@ dt = datetime.datetime.utcnow() assert type(dt.microsecond) is int - copy.copy(dt) \ No newline at end of file + copy.copy(dt) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -326,4 +326,5 @@ return w_some_obj() FakeObjSpace.sys = FakeModule() FakeObjSpace.sys.filesystemencoding = 'foobar' +FakeObjSpace.sys.defaultencoding = 'ascii' FakeObjSpace.builtin = FakeModule() diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -410,7 +410,7 @@ w_new = Constant(newvalue) f = self.crnt_frame stack_items_w = f.locals_stack_w - for i in range(f.valuestackdepth-1, f.nlocals-1, -1): + for i in range(f.valuestackdepth-1, f.pycode.co_nlocals-1, -1): w_v = stack_items_w[i] if isinstance(w_v, Constant): if w_v.value is oldvalue: diff --git a/pypy/objspace/flow/test/test_framestate.py b/pypy/objspace/flow/test/test_framestate.py --- a/pypy/objspace/flow/test/test_framestate.py +++ b/pypy/objspace/flow/test/test_framestate.py @@ -25,7 +25,7 @@ dummy = Constant(None) #dummy.dummy = True arg_list = ([Variable() for i in range(formalargcount)] + - [dummy] * (frame.nlocals - formalargcount)) + [dummy] * (frame.pycode.co_nlocals - formalargcount)) frame.setfastscope(arg_list) return frame @@ -42,7 +42,7 @@ def test_neq_hacked_framestate(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) assert fs1 != fs2 @@ -55,7 +55,7 @@ def test_union_on_hacked_framestates(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) assert fs1.union(fs2) == fs2 # fs2 is more general assert fs2.union(fs1) == fs2 # fs2 is more general @@ -63,7 +63,7 @@ def test_restore_frame(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs1.restoreframe(frame) assert fs1 == FrameState(frame) @@ -82,7 +82,7 @@ def test_getoutputargs(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Variable() + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() fs2 = FrameState(frame) outputargs = fs1.getoutputargs(fs2) # 'x' -> 'x' is a Variable @@ -92,16 +92,16 @@ def test_union_different_constants(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Constant(42) + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(42) fs2 = FrameState(frame) fs3 = fs1.union(fs2) fs3.restoreframe(frame) - assert isinstance(frame.locals_stack_w[frame.nlocals-1], Variable) - # ^^^ generalized + assert isinstance(frame.locals_stack_w[frame.pycode.co_nlocals-1], + Variable) # generalized def test_union_spectag(self): frame = self.getframe(self.func_simple) fs1 = FrameState(frame) - frame.locals_stack_w[frame.nlocals-1] = Constant(SpecTag()) + frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(SpecTag()) fs2 = FrameState(frame) assert fs1.union(fs2) is None # UnionError diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -142,6 +142,17 @@ else: return result + def popitem(self, w_dict): + # this is a bad implementation: if we call popitem() repeatedly, + # it ends up taking n**2 time, because the next() calls below + # will take longer and longer. But all interesting strategies + # provide a better one. + space = self.space + iterator = self.iter(w_dict) + w_key, w_value = iterator.next() + self.delitem(w_dict, w_key) + return (w_key, w_value) + def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -3,7 +3,7 @@ from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation from pypy.objspace.std.dictmultiobject import DictStrategy from pypy.objspace.std.typeobject import unwrap_cell -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib import rerased @@ -44,7 +44,8 @@ raise if not w_type.is_cpytype(): raise - # xxx obscure workaround: allow cpyext to write to type->tp_dict. + # xxx obscure workaround: allow cpyext to write to type->tp_dict + # xxx even in the case of a builtin type. # xxx like CPython, we assume that this is only done early after # xxx the type is created, and we don't invalidate any cache. w_type.dict_w[key] = w_value @@ -86,8 +87,14 @@ for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()] def clear(self, w_dict): - self.unerase(w_dict.dstorage).dict_w.clear() - self.unerase(w_dict.dstorage).mutated(None) + space = self.space + w_type = self.unerase(w_dict.dstorage) + if (not space.config.objspace.std.mutable_builtintypes + and not w_type.is_heaptype()): + msg = "can't clear dictionary of type '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_type.name) + w_type.dict_w.clear() + w_type.mutated(None) class DictProxyIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, dictimplementation): diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -22,6 +22,9 @@ assert NotEmpty.string == 1 raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') + key, value = NotEmpty.__dict__.popitem() + assert (key == 'a' and value == 1) or (key == 'b' and value == 4) + def test_dictproxyeq(self): class a(object): pass @@ -43,6 +46,11 @@ assert s1 == s2 assert s1.startswith('{') and s1.endswith('}') + def test_immutable_dict_on_builtin_type(self): + raises(TypeError, "int.__dict__['a'] = 1") + raises(TypeError, int.__dict__.popitem) + raises(TypeError, int.__dict__.clear) + class AppTestUserObjectMethodCache(AppTestUserObject): def setup_class(cls): cls.space = gettestobjspace( diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -993,7 +993,9 @@ raises(TypeError, setattr, list, 'append', 42) raises(TypeError, setattr, list, 'foobar', 42) raises(TypeError, delattr, dict, 'keys') - + raises(TypeError, 'int.__dict__["a"] = 1') + raises(TypeError, 'int.__dict__.clear()') + def test_nontype_in_mro(self): class OldStyle: pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -103,6 +103,7 @@ 'terminator', '_version_tag?', 'name?', + 'mro_w?[*]', ] # for config.objspace.std.getattributeshortcut @@ -345,9 +346,9 @@ return w_self._lookup_where(name) + @unroll_safe def lookup_starting_at(w_self, w_starttype, name): space = w_self.space - # XXX Optimize this with method cache look = False for w_class in w_self.mro_w: if w_class is w_starttype: diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py --- a/pypy/rlib/debug.py +++ b/pypy/rlib/debug.py @@ -19,14 +19,24 @@ hop.exception_cannot_occur() hop.genop('debug_assert', vlist) -def fatalerror(msg, traceback=False): +def fatalerror(msg): + # print the RPython traceback and abort with a fatal error from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop - if traceback: - llop.debug_print_traceback(lltype.Void) + llop.debug_print_traceback(lltype.Void) llop.debug_fatalerror(lltype.Void, msg) fatalerror._dont_inline_ = True -fatalerror._annspecialcase_ = 'specialize:arg(1)' +fatalerror._jit_look_inside_ = False +fatalerror._annenforceargs_ = [str] + +def fatalerror_notb(msg): + # a variant of fatalerror() that doesn't print the RPython traceback + from pypy.rpython.lltypesystem import lltype + from pypy.rpython.lltypesystem.lloperation import llop + llop.debug_fatalerror(lltype.Void, msg) +fatalerror_notb._dont_inline_ = True +fatalerror_notb._jit_look_inside_ = False +fatalerror_notb._annenforceargs_ = [str] class DebugLog(list): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime @@ -450,6 +454,7 @@ assert v in self.reds self._alllivevars = dict.fromkeys( [name for name in self.greens + self.reds if '.' not in name]) + self._heuristic_order = {} # check if 'reds' and 'greens' are ordered self._make_extregistryentries() self.get_jitcell_at = get_jitcell_at self.set_jitcell_at = set_jitcell_at @@ -461,13 +466,61 @@ def _freeze_(self): return True + def _check_arguments(self, livevars): + assert dict.fromkeys(livevars) == self._alllivevars + # check heuristically that 'reds' and 'greens' are ordered as + # the JIT will need them to be: first INTs, then REFs, then + # FLOATs. + if len(self._heuristic_order) < len(livevars): + from pypy.rlib.rarithmetic import (r_singlefloat, r_longlong, + r_ulonglong, r_uint) + added = False + for var, value in livevars.items(): + if var not in self._heuristic_order: + if (r_ulonglong is not r_uint and + isinstance(value, (r_longlong, r_ulonglong))): + assert 0, ("should not pass a r_longlong argument for " + "now, because on 32-bit machines it needs " + "to be ordered as a FLOAT but on 64-bit " + "machines as an INT") + elif isinstance(value, (int, long, r_singlefloat)): + kind = '1:INT' + elif isinstance(value, float): + kind = '3:FLOAT' + elif isinstance(value, (str, unicode)) and len(value) != 1: + kind = '2:REF' + elif isinstance(value, (list, dict)): + kind = '2:REF' + elif (hasattr(value, '__class__') + and value.__class__.__module__ != '__builtin__'): + if hasattr(value, '_freeze_'): + continue # value._freeze_() is better not called + elif getattr(value, '_alloc_flavor_', 'gc') == 'gc': + kind = '2:REF' + else: + kind = '1:INT' + else: + continue + self._heuristic_order[var] = kind + added = True + if added: + for color in ('reds', 'greens'): + lst = getattr(self, color) + allkinds = [self._heuristic_order.get(name, '?') + for name in lst] + kinds = [k for k in allkinds if k != '?'] + assert kinds == sorted(kinds), ( + "bad order of %s variables in the jitdriver: " + "must be INTs, REFs, FLOATs; got %r" % + (color, allkinds)) + def jit_merge_point(_self, **livevars): # special-cased by ExtRegistryEntry - assert dict.fromkeys(livevars) == _self._alllivevars + _self._check_arguments(livevars) def can_enter_jit(_self, **livevars): # special-cased by ExtRegistryEntry - assert dict.fromkeys(livevars) == _self._alllivevars + _self._check_arguments(livevars) def loop_header(self): # special-cased by ExtRegistryEntry diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -23,9 +23,11 @@ class _Specialize(object): def memo(self): - """ Specialize functions based on argument values. All arguments has - to be constant at the compile time. The whole function call is replaced - by a call result then. + """ Specialize the function based on argument values. All arguments + have to be either constants or PBCs (i.e. instances of classes with a + _freeze_ method returning True). The function call is replaced by + just its result, or in case several PBCs are used, by some fast + look-up of the result. """ def decorated_func(func): func._annspecialcase_ = 'specialize:memo' @@ -33,8 +35,8 @@ return decorated_func def arg(self, *args): - """ Specialize function based on values of given positions of arguments. - They must be compile-time constants in order to work. + """ Specialize the function based on the values of given positions + of arguments. They must be compile-time constants in order to work. There will be a copy of provided function for each combination of given arguments on positions in args (that can lead to @@ -82,8 +84,7 @@ return decorated_func def ll_and_arg(self, *args): - """ This is like ll(), but instead of specializing on all arguments, - specializes on only the arguments at the given positions + """ This is like ll(), and additionally like arg(...). """ def decorated_func(func): func._annspecialcase_ = 'specialize:ll_and_arg' + self._wrap(args) diff --git a/pypy/rlib/test/test_jit.py b/pypy/rlib/test/test_jit.py --- a/pypy/rlib/test/test_jit.py +++ b/pypy/rlib/test/test_jit.py @@ -2,6 +2,7 @@ from pypy.conftest import option from pypy.rlib.jit import hint, we_are_jitted, JitDriver, elidable_promote from pypy.rlib.jit import JitHintError, oopspec, isconstant +from pypy.rlib.rarithmetic import r_uint from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem import lltype @@ -146,6 +147,43 @@ res = self.interpret(f, [-234]) assert res == 1 + def test_argument_order_ok(self): + myjitdriver = JitDriver(greens=['i1', 'r1', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=A(), f1=3.5) + # assert did not raise + + def test_argument_order_wrong(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'f1'], reds=[]) + class A(object): + pass + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), f1=3.5) + + def test_argument_order_more_precision_later(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=None, r2=None, f1=3.5) + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5) + assert "got ['2:REF', '1:INT', '?', '3:FLOAT']" in repr(e.value) + + def test_argument_order_more_precision_later_2(self): + myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[]) + class A(object): + pass + myjitdriver.jit_merge_point(i1=42, r1=None, r2=A(), f1=3.5) + e = raises(AssertionError, + myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5) + assert "got ['2:REF', '1:INT', '2:REF', '3:FLOAT']" in repr(e.value) + + def test_argument_order_accept_r_uint(self): + # this used to fail on 64-bit, because r_uint == r_ulonglong + myjitdriver = JitDriver(greens=['i1'], reds=[]) + myjitdriver.jit_merge_point(i1=r_uint(42)) + class TestJITLLtype(BaseTestJIT, LLRtypeMixin): pass diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -392,7 +392,11 @@ ('list', r_list.lowleveltype), ('index', Signed))) self.ll_listiter = ll_listiter - self.ll_listnext = ll_listnext + if (isinstance(r_list, FixedSizeListRepr) + and not r_list.listitem.mutated): + self.ll_listnext = ll_listnext_foldable + else: + self.ll_listnext = ll_listnext self.ll_getnextindex = ll_getnextindex def ll_listiter(ITERPTR, lst): @@ -409,5 +413,14 @@ iter.index = index + 1 # cannot overflow because index < l.length return l.ll_getitem_fast(index) +def ll_listnext_foldable(iter): + from pypy.rpython.rlist import ll_getitem_foldable_nonneg + l = iter.list + index = iter.index + if index >= l.ll_length(): + raise StopIteration + iter.index = index + 1 # cannot overflow because index < l.length + return ll_getitem_foldable_nonneg(l, index) + def ll_getnextindex(iter): return iter.index diff --git a/pypy/rpython/memory/gc/generation.py b/pypy/rpython/memory/gc/generation.py --- a/pypy/rpython/memory/gc/generation.py +++ b/pypy/rpython/memory/gc/generation.py @@ -41,8 +41,8 @@ # the following values override the default arguments of __init__ when # translating to a real backend. - TRANSLATION_PARAMS = {'space_size': 8*1024*1024, # XXX adjust - 'nursery_size': 896*1024, + TRANSLATION_PARAMS = {'space_size': 8*1024*1024, # 8 MB + 'nursery_size': 3*1024*1024, # 3 MB 'min_nursery_size': 48*1024, 'auto_nursery_size': True} @@ -92,8 +92,9 @@ # the GC is fully setup now. The rest can make use of it. if self.auto_nursery_size: newsize = nursery_size_from_env() - if newsize <= 0: - newsize = env.estimate_best_nursery_size() + #if newsize <= 0: + # ---disabled--- just use the default value. + # newsize = env.estimate_best_nursery_size() if newsize > 0: self.set_nursery_size(newsize) diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -608,6 +608,11 @@ specified as 0 if the object is not varsized. The returned object is fully initialized and zero-filled.""" # + # Here we really need a valid 'typeid', not 0 (as the JIT might + # try to send us if there is still a bug). + ll_assert(bool(self.combine(typeid, 0)), + "external_malloc: typeid == 0") + # # Compute the total size, carefully checking for overflows. size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + self.fixed_size(typeid) diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -8,6 +8,7 @@ from pypy.rpython.rlist import * from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist from pypy.rpython.lltypesystem import rlist as ll_rlist +from pypy.rpython.llinterp import LLException from pypy.rpython.ootypesystem import rlist as oo_rlist from pypy.rpython.rint import signed_repr from pypy.objspace.flow.model import Constant, Variable @@ -1477,6 +1478,80 @@ assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') + def test_iterate_over_immutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_immutable_list_quasiimmut_attr(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + class Foo: + _immutable_fields_ = ['lst?[*]'] + lst = list('abcdef') + foo = Foo() + def dummyfn(): + total = 0 + for c in foo.lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_mutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + lst[0] = 'x' + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + res = self.interpret(dummyfn, []) + assert res == sum(map(ord, 'abcdef')) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + class TestOOtype(BaseTestRlist, OORtypeMixin): rlist = oo_rlist type_system = 'ootype' diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -387,7 +387,7 @@ m = re.search('guard \d+', comm) name = m.group(0) else: - name = comm[2:comm.find(':')-1] + name = " ".join(comm[2:].split(" ", 2)[:2]) if name in dumps: bname, start_ofs, dump = dumps[name] loop.force_asm = (lambda dump=dump, start_ofs=start_ofs, diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -82,6 +82,9 @@ for file in ['LICENSE', 'README']: shutil.copy(str(basedir.join(file)), str(pypydir)) pypydir.ensure('include', dir=True) + if sys.platform == 'win32': + shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")), + str(pypydir.join('include/python27.lib'))) # we want to put there all *.h and *.inl from trunk/include # and from pypy/_interfaces includedir = basedir.join('include') diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -28,11 +28,13 @@ gctransformer = None def __init__(self, translator=None, standalone=False, + cpython_extension=False, gcpolicyclass=None, thread_enabled=False, sandbox=False): self.translator = translator self.standalone = standalone + self.cpython_extension = cpython_extension self.sandbox = sandbox if gcpolicyclass is None: gcpolicyclass = gc.RefcountingGcPolicy diff --git a/pypy/translator/c/dlltool.py b/pypy/translator/c/dlltool.py --- a/pypy/translator/c/dlltool.py +++ b/pypy/translator/c/dlltool.py @@ -14,11 +14,14 @@ CBuilder.__init__(self, *args, **kwds) def getentrypointptr(self): + entrypoints = [] bk = self.translator.annotator.bookkeeper - graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions] - return [getfunctionptr(graph) for graph in graphs] + for f, _ in self.functions: + graph = bk.getdesc(f).getuniquegraph() + entrypoints.append(getfunctionptr(graph)) + return entrypoints - def gen_makefile(self, targetdir): + def gen_makefile(self, targetdir, exe_name=None): pass # XXX finish def compile(self): diff --git a/pypy/translator/c/extfunc.py b/pypy/translator/c/extfunc.py --- a/pypy/translator/c/extfunc.py +++ b/pypy/translator/c/extfunc.py @@ -106,7 +106,7 @@ yield ('RPYTHON_EXCEPTION_MATCH', exceptiondata.fn_exception_match) yield ('RPYTHON_TYPE_OF_EXC_INST', exceptiondata.fn_type_of_exc_inst) yield ('RPYTHON_RAISE_OSERROR', exceptiondata.fn_raise_OSError) - if not db.standalone: + if db.cpython_extension: yield ('RPYTHON_PYEXCCLASS2EXC', exceptiondata.fn_pyexcclass2exc) yield ('RPyExceptionOccurred1', exctransformer.rpyexc_occured_ptr.value) diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -471,8 +471,8 @@ return [] IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ - 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', + 'cmp', 'test', 'set', 'sahf', 'lahf', 'cld', 'std', + 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', + 'paddq', 'pinsr', 'pmul', 'psrl', + # sign-extending moves should not produce GC pointers + 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers 'movz', # locked operations should not move GC pointers, at least so far @@ -892,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: @@ -1695,6 +1699,8 @@ } """ elif self.format in ('elf64', 'darwin64'): + if self.format == 'elf64': # gentoo patch: hardened systems + print >> output, "\t.section .note.GNU-stack,\"\",%progbits" print >> output, "\t.text" print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') _variant(elf64='.type pypy_asm_stackwalk, @function', diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -111,6 +111,7 @@ _compiled = False modulename = None split = False + cpython_extension = False def __init__(self, translator, entrypoint, config, gcpolicy=None, secondary_entrypoints=()): @@ -138,6 +139,7 @@ raise NotImplementedError("--gcrootfinder=asmgcc requires standalone") db = LowLevelDatabase(translator, standalone=self.standalone, + cpython_extension=self.cpython_extension, gcpolicyclass=gcpolicyclass, thread_enabled=self.config.translation.thread, sandbox=self.config.translation.sandbox) @@ -236,6 +238,8 @@ CBuilder.have___thread = self.translator.platform.check___thread() if not self.standalone: assert not self.config.translation.instrument + if self.cpython_extension: + defines['PYPY_CPYTHON_EXTENSION'] = 1 else: defines['PYPY_STANDALONE'] = db.get(pf) if self.config.translation.instrument: @@ -307,13 +311,18 @@ class CExtModuleBuilder(CBuilder): standalone = False + cpython_extension = True _module = None _wrapper = None def get_eci(self): from distutils import sysconfig python_inc = sysconfig.get_python_inc() - eci = ExternalCompilationInfo(include_dirs=[python_inc]) + eci = ExternalCompilationInfo( + include_dirs=[python_inc], + includes=["Python.h", + ], + ) return eci.merge(CBuilder.get_eci(self)) def getentrypointptr(self): # xxx diff --git a/pypy/translator/c/src/asm_gcc_x86.h b/pypy/translator/c/src/asm_gcc_x86.h --- a/pypy/translator/c/src/asm_gcc_x86.h +++ b/pypy/translator/c/src/asm_gcc_x86.h @@ -102,6 +102,12 @@ #endif /* !PYPY_CPU_HAS_STANDARD_PRECISION */ +#ifdef PYPY_X86_CHECK_SSE2 +#define PYPY_X86_CHECK_SSE2_DEFINED +extern void pypy_x86_check_sse2(void); +#endif + + /* implementations */ #ifndef PYPY_NOT_MAIN_FILE @@ -113,4 +119,25 @@ } # endif +# ifdef PYPY_X86_CHECK_SSE2 +void pypy_x86_check_sse2(void) +{ + //Read the CPU features. + int features; + asm("mov $1, %%eax\n" + "cpuid\n" + "mov %%edx, %0" + : "=g"(features) : : "eax", "ebx", "edx", "ecx"); + + //Check bits 25 and 26, this indicates SSE2 support + if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0)) + { + fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n" + "You need to re-translate with " + "'--jit-backend=x86-without-sse2'\n"); + abort(); + } +} +# endif + #endif diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c --- a/pypy/translator/c/src/debug_print.c +++ b/pypy/translator/c/src/debug_print.c @@ -1,3 +1,4 @@ +#define PYPY_NOT_MAIN_FILE #include #include diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -46,13 +46,13 @@ * of return type *Bigint all return NULL to indicate a malloc failure. * Similarly, rv_alloc and nrv_alloc (return type char *) return NULL on * failure. bigcomp now has return type int (it used to be void) and - * returns -1 on failure and 0 otherwise. _Py_dg_dtoa returns NULL - * on failure. _Py_dg_strtod indicates failure due to malloc failure + * returns -1 on failure and 0 otherwise. __Py_dg_dtoa returns NULL + * on failure. __Py_dg_strtod indicates failure due to malloc failure * by returning -1.0, setting errno=ENOMEM and *se to s00. * * 4. The static variable dtoa_result has been removed. Callers of - * _Py_dg_dtoa are expected to call _Py_dg_freedtoa to free - * the memory allocated by _Py_dg_dtoa. + * __Py_dg_dtoa are expected to call __Py_dg_freedtoa to free + * the memory allocated by __Py_dg_dtoa. * * 5. The code has been reformatted to better fit with Python's * C style guide (PEP 7). @@ -61,7 +61,7 @@ * that hasn't been MALLOC'ed, private_mem should only be used when k <= * Kmax. * - * 7. _Py_dg_strtod has been modified so that it doesn't accept strings with + * 7. __Py_dg_strtod has been modified so that it doesn't accept strings with * leading whitespace. * ***************************************************************/ @@ -283,7 +283,7 @@ #define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1)) #define Big1 0xffffffff -/* struct BCinfo is used to pass information from _Py_dg_strtod to bigcomp */ +/* struct BCinfo is used to pass information from __Py_dg_strtod to bigcomp */ typedef struct BCinfo BCinfo; struct @@ -494,7 +494,7 @@ /* convert a string s containing nd decimal digits (possibly containing a decimal separator at position nd0, which is ignored) to a Bigint. This - function carries on where the parsing code in _Py_dg_strtod leaves off: on + function carries on where the parsing code in __Py_dg_strtod leaves off: on entry, y9 contains the result of converting the first 9 digits. Returns NULL on failure. */ @@ -1050,7 +1050,7 @@ } /* Convert a scaled double to a Bigint plus an exponent. Similar to d2b, - except that it accepts the scale parameter used in _Py_dg_strtod (which + except that it accepts the scale parameter used in __Py_dg_strtod (which should be either 0 or 2*P), and the normalization for the return value is different (see below). On input, d should be finite and nonnegative, and d / 2**scale should be exactly representable as an IEEE 754 double. @@ -1351,9 +1351,9 @@ /* The bigcomp function handles some hard cases for strtod, for inputs with more than STRTOD_DIGLIM digits. It's called once an initial estimate for the double corresponding to the input string has - already been obtained by the code in _Py_dg_strtod. + already been obtained by the code in __Py_dg_strtod. - The bigcomp function is only called after _Py_dg_strtod has found a + The bigcomp function is only called after __Py_dg_strtod has found a double value rv such that either rv or rv + 1ulp represents the correctly rounded value corresponding to the original string. It determines which of these two values is the correct one by @@ -1368,12 +1368,12 @@ s0 points to the first significant digit of the input string. rv is a (possibly scaled) estimate for the closest double value to the - value represented by the original input to _Py_dg_strtod. If + value represented by the original input to __Py_dg_strtod. If bc->scale is nonzero, then rv/2^(bc->scale) is the approximation to the input value. bc is a struct containing information gathered during the parsing and - estimation steps of _Py_dg_strtod. Description of fields follows: + estimation steps of __Py_dg_strtod. Description of fields follows: bc->e0 gives the exponent of the input value, such that dv = (integer given by the bd->nd digits of s0) * 10**e0 @@ -1505,7 +1505,7 @@ } static double -_Py_dg_strtod(const char *s00, char **se) +__Py_dg_strtod(const char *s00, char **se) { int bb2, bb5, bbe, bd2, bd5, bs2, c, dsign, e, e1, error; int esign, i, j, k, lz, nd, nd0, odd, sign; @@ -1849,7 +1849,7 @@ for(;;) { - /* This is the main correction loop for _Py_dg_strtod. + /* This is the main correction loop for __Py_dg_strtod. We've got a decimal value tdv, and a floating-point approximation srv=rv/2^bc.scale to tdv. The aim is to determine whether srv is @@ -2283,7 +2283,7 @@ */ static void -_Py_dg_freedtoa(char *s) +__Py_dg_freedtoa(char *s) { Bigint *b = (Bigint *)((int *)s - 1); b->maxwds = 1 << (b->k = *(int*)b); @@ -2325,11 +2325,11 @@ */ /* Additional notes (METD): (1) returns NULL on failure. (2) to avoid memory - leakage, a successful call to _Py_dg_dtoa should always be matched by a - call to _Py_dg_freedtoa. */ + leakage, a successful call to __Py_dg_dtoa should always be matched by a + call to __Py_dg_freedtoa. */ static char * -_Py_dg_dtoa(double dd, int mode, int ndigits, +__Py_dg_dtoa(double dd, int mode, int ndigits, int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those @@ -2926,7 +2926,7 @@ if (b) Bfree(b); if (s0) - _Py_dg_freedtoa(s0); + __Py_dg_freedtoa(s0); return NULL; } @@ -2947,7 +2947,7 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_strtod(s00, se); + result = __Py_dg_strtod(s00, se); _PyPy_SET_53BIT_PRECISION_END; return result; } @@ -2959,14 +2959,14 @@ _PyPy_SET_53BIT_PRECISION_HEADER; _PyPy_SET_53BIT_PRECISION_START; - result = _Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); + result = __Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); _PyPy_SET_53BIT_PRECISION_END; return result; } void _PyPy_dg_freedtoa(char *s) { - _Py_dg_freedtoa(s); + __Py_dg_freedtoa(s); } /* End PYPY hacks */ diff --git a/pypy/translator/c/src/exception.h b/pypy/translator/c/src/exception.h --- a/pypy/translator/c/src/exception.h +++ b/pypy/translator/c/src/exception.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header subsection: exceptions ***/ -#if !defined(PYPY_STANDALONE) && !defined(PYPY_NOT_MAIN_FILE) +#if defined(PYPY_CPYTHON_EXTENSION) && !defined(PYPY_NOT_MAIN_FILE) PyObject *RPythonError; #endif @@ -74,7 +74,7 @@ RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(rexc), rexc); } -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION void RPyConvertExceptionFromCPython(void) { /* convert the CPython exception to an RPython one */ diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header file for code produced by genc.py ***/ -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION # include "Python.h" # include "compile.h" # include "frameobject.h" diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h --- a/pypy/translator/c/src/g_prerequisite.h +++ b/pypy/translator/c/src/g_prerequisite.h @@ -5,8 +5,6 @@ #ifdef PYPY_STANDALONE # include "src/commondefs.h" -#else -# include "Python.h" #endif #ifdef _WIN32 diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h --- a/pypy/translator/c/src/main.h +++ b/pypy/translator/c/src/main.h @@ -36,6 +36,9 @@ RPyListOfString *list; pypy_asm_stack_bottom(); +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif instrument_setup(); if (sizeof(void*) != SIZEOF_LONG) { diff --git a/pypy/translator/c/src/pyobj.h b/pypy/translator/c/src/pyobj.h --- a/pypy/translator/c/src/pyobj.h +++ b/pypy/translator/c/src/pyobj.h @@ -2,7 +2,7 @@ /************************************************************/ /*** C header subsection: untyped operations ***/ /*** as OP_XXX() macros calling the CPython API ***/ - +#ifdef PYPY_CPYTHON_EXTENSION #define op_bool(r,what) { \ int _retval = what; \ @@ -261,3 +261,5 @@ } #endif + +#endif /* PYPY_CPYTHON_EXTENSION */ diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -104,7 +104,7 @@ # define RPyBareItem(array, index) ((array)[index]) #endif -#ifndef PYPY_STANDALONE +#ifdef PYPY_CPYTHON_EXTENSION /* prototypes */ diff --git a/pypy/translator/c/test/test_dlltool.py b/pypy/translator/c/test/test_dlltool.py --- a/pypy/translator/c/test/test_dlltool.py +++ b/pypy/translator/c/test/test_dlltool.py @@ -2,7 +2,6 @@ from pypy.translator.c.dlltool import DLLDef from ctypes import CDLL import py -py.test.skip("fix this if needed") class TestDLLTool(object): def test_basic(self): @@ -16,8 +15,8 @@ d = DLLDef('lib', [(f, [int]), (b, [int])]) so = d.compile() dll = CDLL(str(so)) - assert dll.f(3) == 3 - assert dll.b(10) == 12 + assert dll.pypy_g_f(3) == 3 + assert dll.pypy_g_b(10) == 12 def test_split_criteria(self): def f(x): @@ -28,4 +27,5 @@ d = DLLDef('lib', [(f, [int]), (b, [int])]) so = d.compile() - assert py.path.local(so).dirpath().join('implement.c').check() + dirpath = py.path.local(so).dirpath() + assert dirpath.join('translator_c_test_test_dlltool.c').check() diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -331,6 +331,7 @@ raise Exception("stand-alone program entry point must return an " "int (and not, e.g., None or always raise an " "exception).") + annotator.complete() annotator.simplify() return s @@ -558,6 +559,9 @@ newsoname = newexename.new(basename=soname.basename) shutil.copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) + if sys.platform == 'win32': + shutil.copyfile(str(soname.new(ext='lib')), + str(newsoname.new(ext='lib'))) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) diff --git a/pypy/translator/sandbox/test/test_sandbox.py b/pypy/translator/sandbox/test/test_sandbox.py --- a/pypy/translator/sandbox/test/test_sandbox.py +++ b/pypy/translator/sandbox/test/test_sandbox.py @@ -145,9 +145,9 @@ g = pipe.stdin f = pipe.stdout expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GENERATIONGC_NURSERY",), None) - if sys.platform.startswith('linux'): # on Mac, uses another (sandboxsafe) approach - expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420), - OSError(5232, "xyz")) + #if sys.platform.startswith('linux'): + # expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420), + # OSError(5232, "xyz")) expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GC_DEBUG",), None) g.close() tail = f.read() From noreply at buildbot.pypy.org Sat Mar 3 07:27:41 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 07:27:41 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: make string and unicode boxes instantiatable, but completely unusable Message-ID: <20120303062741.6BB698204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53133:9b0ff4af758c Date: 2012-03-02 22:27 -0800 http://bitbucket.org/pypy/pypy/changeset/9b0ff4af758c/ Log: make string and unicode boxes instantiatable, but completely unusable diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1335,7 +1335,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.stringtype import str_typedef -from pypy.objspace.std.unicodetype import unicode_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -242,15 +242,17 @@ arr.storage[i] = arg[i] return W_StringBox(arr, 0) + class W_UnicodeBox(W_CharacterBox): def descr__new__(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_numarray import W_NDimArray from pypy.module.micronumpy.interp_dtype import new_unicode_dtype - arg = space.unicode_w(space.unicode(w_arg)) + arg = space.unicode_w(unicode_from_object(space, w_arg)) arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) - for i in range(len(arg)): - arr.setitem(i, arg[i]) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] return W_UnicodeBox(arr, 0) W_GenericBox.typedef = TypeDef("generic", @@ -424,6 +426,8 @@ W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__.im_func), + __eq__ = interp2app(W_StringBox.descr_eq), + __ne__ = interp2app(W_StringBox.descr_ne), ) W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -525,11 +525,11 @@ def test_string_boxes(self): from _numpypy import str_ - assert str_(3) == '3' + assert isinstance(str_(3), str_) def test_unicode_boxes(self): - from _numpypy import str_ - assert str_(3) == '3' + from _numpypy import unicode_ + assert isinstance(unicode_(3), unicode) class AppTestRecordDtypes(BaseNumpyAppTest): def test_create(self): From noreply at buildbot.pypy.org Sat Mar 3 07:55:21 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 07:55:21 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: fix the merge Message-ID: <20120303065521.C8B808204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53134:6dcf1d779df6 Date: 2012-03-02 22:54 -0800 http://bitbucket.org/pypy/pypy/changeset/6dcf1d779df6/ Log: fix the merge diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -74,21 +74,24 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(count, [count], dtype=dtype) - fromstring_loop(a, count, dtype, itemsize, s) + a = W_NDimArray([count], dtype=dtype) + fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) -fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', - 'dtype', 's', 'a']) +fromstring_driver = jit.JitDriver(greens=[], reds=['i', 'itemsize', + 'dtype', 'ai', 's', 'a']) -def fromstring_loop(a, count, dtype, itemsize, s): +def fromstring_loop(a, dtype, itemsize, s): i = 0 - while i < count: - fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, - itemsize=itemsize, s=s, i=i) + ai = a.create_iter() + while not ai.done(): + fromstring_driver.jit_merge_point(a=a, dtype=dtype, + itemsize=itemsize, s=s, i=i, + ai=ai) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) - a.dtype.setitem(a.storage, i, val) - i += itemsize + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) + i += 1 @unwrap_spec(s=str, count=int, sep=str) def fromstring(space, s, w_dtype=None, count=-1, sep=''): From noreply at buildbot.pypy.org Sat Mar 3 08:05:36 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 08:05:36 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: implement more stuff on fake objspace Message-ID: <20120303070536.A59838204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53135:9b1cb0354751 Date: 2012-03-02 23:05 -0800 http://bitbucket.org/pypy/pypy/changeset/9b1cb0354751/ Log: implement more stuff on fake objspace diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -207,6 +207,11 @@ is_arguments(args) return w_some_obj() + def get_and_call_function(space, w_descr, w_obj, *args_w): + args = argument.Arguments(space, list(args_w)) + w_impl = space.get(w_descr, w_obj) + return space.call_args(w_impl, args) + def gettypefor(self, cls): return self.gettypeobject(cls.typedef) From noreply at buildbot.pypy.org Sat Mar 3 08:46:02 2012 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Mar 2012 08:46:02 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: another stab at getting the bootstrapping right ... Message-ID: <20120303074602.7CCED8204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53136:fad46404508b Date: 2012-03-02 23:00 -0800 http://bitbucket.org/pypy/pypy/changeset/fad46404508b/ Log: another stab at getting the bootstrapping right ... diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -51,12 +51,6 @@ r_cppscope = W_CPPType(space, final_name, cppscope) state.r_cppscope_cache[name] = r_cppscope - # prevent getting reflection info that may be linked in through the - # back-end libs or that may be available through an auto-loader, during - # translation time (else it will get translated, too) - if space.config.translating and not objectmodel.we_are_translated(): - return r_cppscope - r_cppscope._find_methods() r_cppscope._find_data_members() return r_cppscope @@ -432,8 +426,6 @@ self.data_members[data_member_name] = data_member def update(self): - if self.space.config.translating and not objectmodel.we_are_translated(): - return cpptype self._find_methods() self._find_data_members() diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -92,11 +92,22 @@ def make_cppnamespace(namespace_name, cppns, build_in_full=True): - nsdct = {"_cpp_proxy" : cppns } + # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + if cppns: + nsdct = {"_cpp_proxy" : cppns } + else: + nsdct = dict() + def cpp_proxy_loader(cls): + cpp_proxy = cppyy._type_byname(cls.__name__ != '::' and cls.__name__ or '') + del cls.__class__._cpp_proxy + cls._cpp_proxy = cpp_proxy + return cpp_proxy + metans._cpp_proxy = property(cpp_proxy_loader) + if build_in_full: # if False, rely on lazy build-up # insert static methods into the "namespace" dictionary for func_name in cppns.get_method_names(): @@ -207,8 +218,6 @@ if isinstance(scope, CppyyNamespaceMeta): global _loaded_dictionaries_isdirty if _loaded_dictionaries_isdirty: # TODO: this should be per namespace - if not scope._cpp_proxy: - scope._cpp_proxy = cppyy._type_byname(scope.__name__) scope._cpp_proxy.update() # TODO: this is currently quadratic _loaded_dictionaries_isdirty = False @@ -295,7 +304,7 @@ _loaded_dictionaries = {} -_loaded_dictionaries_isdirty = False # should be per namespace +_loaded_dictionaries_isdirty = True # should be per namespace def load_reflection_info(name): try: return _loaded_dictionaries[name] @@ -307,10 +316,10 @@ return dct -# user interface objects (note the two-step: creation of global functions may -# cause the creation of classes in the global namespace, so gbl must exist at -# that point to cache them) -gbl = make_cppnamespace("::", cppyy._type_byname(""), False) # global C++ namespace +# user interface objects (note the two-step of not calling type_byname here: +# creation of global functions may cause the creation of classes in the global +# namespace, so gbl must exist at that point to cache them) +gbl = make_cppnamespace("::", None, False) # global C++ namespace -# mostly for the benefit of CINT, which treats std as special -gbl.std = make_cppnamespace("std", cppyy._type_byname("std"), False) +# mostly for the benefit of the CINT backend, which treats std as special +gbl.std = make_cppnamespace("std", None, False) From noreply at buildbot.pypy.org Sat Mar 3 08:46:03 2012 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Mar 2012 08:46:03 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20120303074603.DACEA8204C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53137:3759abe3a35d Date: 2012-03-02 23:00 -0800 http://bitbucket.org/pypy/pypy/changeset/3759abe3a35d/ Log: merge default into branch diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1393,7 +1393,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -273,9 +272,9 @@ not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -341,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -353,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -370,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -444,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -527,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -537,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -549,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -576,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2064,11 +2064,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -288,10 +288,10 @@ if y&4 == 0: x1, x2 = x2, x1 return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - res = self.meta_interp(f, [6, sys.maxint, 32, 48]) - assert res == f(6, sys.maxint, 32, 48) def test_loop_invariant_intbox(self): diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -162,13 +163,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -586,13 +589,29 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) - else: - return space.w_NotImplemented + @specialize.arg(3) + def _cmp_impl(space, self, other, space_fn): + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) + + def eq__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -851,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -874,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -894,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) From noreply at buildbot.pypy.org Sat Mar 3 12:39:28 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Mar 2012 12:39:28 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Bug, test and fix. Message-ID: <20120303113928.5E7B68204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53138:7fd428833b06 Date: 2012-03-03 12:35 +0100 http://bitbucket.org/pypy/pypy/changeset/7fd428833b06/ Log: Bug, test and fix. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -337,13 +337,15 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, return_addr_words_from_esp=0): + def get_basic_shape(self, return_addr_from_esp=0): # XXX: Should this code even really know about stack frame layout of # the JIT? - if return_addr_words_from_esp == 0: + if return_addr_from_esp == 0: retaddr = chr(self.LOC_EBP_PLUS | 4) # return addr: at WORD(%rbp) else: - x = return_addr_words_from_esp * 4 + x = return_addr_from_esp + if self.is_64_bit: + x >>= 1 assert 0 < x < 128 retaddr = chr(self.LOC_ESP_PLUS | x) # @@ -530,7 +532,7 @@ def initialize(self): pass - def get_basic_shape(self, return_addr_words_from_esp=0): + def get_basic_shape(self, return_addr_from_esp=0): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1140,9 +1140,10 @@ for i in range(start, n): loc = arglocs[i] p += loc.get_width() - extra_esp = p//WORD - OFFSTACK_REAL_FRAME - if extra_esp > 0: - extra_esp = align_stack_words(extra_esp) * WORD + extra_esp = 0 + if p > OFFSTACK_REAL_FRAME * WORD: + extra_esp = WORD * align_stack_words(p // WORD - + OFFSTACK_REAL_FRAME) self.mc.SUB_ri(esp.value, extra_esp) p = 0 @@ -1169,7 +1170,7 @@ #self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots(force_index) + self.mark_gc_roots(force_index, extra_esp=extra_esp) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) @@ -1266,7 +1267,7 @@ #self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots(force_index) + self.mark_gc_roots(force_index, extra_esp=extra_esp) if extra_esp > 0: self.mc.ADD_ri(esp.value, extra_esp) @@ -2586,12 +2587,13 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self, force_index, use_copy_area=False): + def mark_gc_roots(self, force_index, use_copy_area=False, extra_esp=0): if force_index < 0: return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area, + orf = OFFSTACK_REAL_FRAME * WORD + extra_esp) if gcrootmap.is_shadow_stack: gcrootmap.write_callshape(mark, force_index) else: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -207,7 +207,8 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - def reserve_param(self, n): + @staticmethod + def reserve_param(n): assert n <= OFFSTACK_REAL_FRAME #self.param_depth = max(self.param_depth, n) @@ -1417,9 +1418,9 @@ # This operation is used only for testing self.force_spill_var(op.getarg(0)) - def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - orf = OFFSTACK_REAL_FRAME - shape = gcrootmap.get_basic_shape(return_addr_words_from_esp=orf) + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False, + orf=OFFSTACK_REAL_FRAME*WORD): + shape = gcrootmap.get_basic_shape(return_addr_from_esp=orf) for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -789,6 +789,49 @@ def test_compile_framework_minimal_size_in_nursery(self): self.run('compile_framework_minimal_size_in_nursery') + def define_compile_framework_big_call(self): + class A: + pass + @dont_look_inside + def bigcall(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, + b0, b1, b2, b3, b4, b5, b6, b7, b8, b9): + check(a0 == 100) + check(a1 == 110) + check(a2 == 120) + check(a3 == 130) + check(a4 == 140) + check(a5 == 150) + check(a6 == 160) + check(a7 == 170) + check(a8 == 180) + check(a9 == 190) + check(b0 == -60) + check(b1 == -61) + check(b2 == -62) + check(b3 == -63) + check(b4 == -64) + check(b5 == -65) + check(b6 == -66) + check(b7 == -67) + check(b8 == -68) + check(b9 == -69) + return [A(), A(), A()] + @unroll_safe + def f42(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + lst = [] + i = 0 + while i < 42: + lst = bigcall(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, + -60, -61, -62, -63, -64, -65, -66, -67, -68, -69) + i += 1 + check(len(lst) == 3) + n -= 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f42, None + + def test_compile_framework_big_call(self): + self.run('compile_framework_big_call') + class TestShadowStack(CompileFrameworkTests): gcrootfinder = "shadowstack" From noreply at buildbot.pypy.org Sat Mar 3 17:31:40 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 17:31:40 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: oops Message-ID: <20120303163140.6D9178204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53139:3e8e02abd398 Date: 2012-03-03 08:31 -0800 http://bitbucket.org/pypy/pypy/changeset/3e8e02abd398/ Log: oops diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -426,8 +426,6 @@ W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__.im_func), - __eq__ = interp2app(W_StringBox.descr_eq), - __ne__ = interp2app(W_StringBox.descr_ne), ) W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), From noreply at buildbot.pypy.org Sat Mar 3 17:32:46 2012 From: noreply at buildbot.pypy.org (justinpeel) Date: Sat, 3 Mar 2012 17:32:46 +0100 (CET) Subject: [pypy-commit] pypy default: Speed up cPickle in general and in particular with datetime objects and longs in protocol 2. The general changes include making the dispatch dict key on integers rather than strings and rewriting the load() loop to be more JIT friendly. Message-ID: <20120303163246.853FB8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r53140:8b5f9add3f20 Date: 2012-03-03 09:32 -0700 http://bitbucket.org/pypy/pypy/changeset/8b5f9add3f20/ Log: Speed up cPickle in general and in particular with datetime objects and longs in protocol 2. The general changes include making the dispatch dict key on integers rather than strings and rewriting the load() loop to be more JIT friendly. diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) From noreply at buildbot.pypy.org Sat Mar 3 18:03:07 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 18:03:07 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: some renaming attempt Message-ID: <20120303170307.78CDE8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53141:06b6c2676f47 Date: 2012-03-03 09:02 -0800 http://bitbucket.org/pypy/pypy/changeset/06b6c2676f47/ Log: some renaming attempt diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -232,7 +232,7 @@ pass class W_StringBox(W_CharacterBox): - def descr__new__(space, w_subtype, w_arg): + def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_numarray import W_NDimArray from pypy.module.micronumpy.interp_dtype import new_string_dtype @@ -244,7 +244,7 @@ class W_UnicodeBox(W_CharacterBox): - def descr__new__(space, w_subtype, w_arg): + def descr__new__unicode_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_numarray import W_NDimArray from pypy.module.micronumpy.interp_dtype import new_unicode_dtype @@ -425,11 +425,11 @@ W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), __module__ = "numpypy", - __new__ = interp2app(W_StringBox.descr__new__.im_func), + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), __module__ = "numpypy", - __new__ = interp2app(W_UnicodeBox.descr__new__.im_func), + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) From noreply at buildbot.pypy.org Sat Mar 3 18:43:10 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 18:43:10 +0100 (CET) Subject: [pypy-commit] pypy struct-double: a branch to speedup struct.pack('d', float). currently not working. Message-ID: <20120303174310.0F83F8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53142:c864362c97b7 Date: 2012-03-03 12:42 -0500 http://bitbucket.org/pypy/pypy/changeset/c864362c97b7/ Log: a branch to speedup struct.pack('d', float). currently not working. diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -2,10 +2,12 @@ """ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC, + SomeFloat) from pypy.rlib.rarithmetic import ovfcheck +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype from pypy.tool.pairtype import pair, pairtype -from pypy.rpython.extregistry import ExtRegistryEntry # -------------- public API for string functions ----------------------- @@ -94,6 +96,24 @@ class StringBuilder(AbstractStringBuilder): tp = str + def append_float(self, f): + import struct + from pypy.rpython.lltypesystem import rffi + + T = lltype.typeOf(f) + if T == lltype.Float: + fmt = "d" + elif T == lltype.SingleFloat: + fmt = "f" + else: + raise TypeError("this takes only float and r_singlefloat") + + size = rffi.sizeof(T) + self._grow(size) + + self.l.append(struct.pack(fmt, f)) + + class UnicodeBuilder(AbstractStringBuilder): tp = unicode @@ -125,6 +145,10 @@ assert isinstance(s_size, SomeInteger) return s_None + def method_append_float(self, f): + assert isinstance(f, SomeFloat) + return s_None + def method_getlength(self): return SomeInteger(nonneg=True) diff --git a/pypy/rlib/test/test_rstring.py b/pypy/rlib/test/test_rstring.py --- a/pypy/rlib/test/test_rstring.py +++ b/pypy/rlib/test/test_rstring.py @@ -1,7 +1,9 @@ import sys +from pypy.rlib.rarithmetic import r_singlefloat from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit + def test_split(): assert split("", 'x') == [''] assert split("a", "a", 1) == ['', ''] @@ -33,6 +35,20 @@ s.append_multiple_char('d', 4) assert s.build() == "aabcabdddd" + s = StringBuilder() + s.append("a") + s.append_float(3.0) + s.append("a") + assert s.getlength() == 10 + assert s.build() == "a\x00\x00\x00\x00\x00\x00\x08 at a" + + s = StringBuilder() + s.append("c") + s.append_float(r_singlefloat(2.0)) + s.append("c") + assert s.getlength() == 6 + assert s.build() == "c\x00\x00\x00 at c" + def test_unicode_builder(): s = UnicodeBuilder() s.append(u'a') @@ -42,4 +58,4 @@ s.append_multiple_char(u'd', 4) assert s.build() == 'aabcbdddd' assert isinstance(s.build(), unicode) - + diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -3,7 +3,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.annlowlevel import llstr from pypy.rpython.rptr import PtrRepr -from pypy.rpython.lltypesystem import lltype, rstr +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rpython.lltypesystem.lltype import staticAdtMethod, nullptr from pypy.rpython.lltypesystem.rstr import (STR, UNICODE, char_repr, string_repr, unichar_repr, unicode_repr) @@ -51,6 +51,8 @@ ('buf', lltype.Ptr(UNICODE)), adtmeths={'grow':staticAdtMethod(unicodebuilder_grow)}) +FLOAT_ARRAY = lltype.Ptr(lltype.Array(lltype.Float)) + MAX = 16*1024*1024 class BaseStringBuilderRepr(AbstractStringBuilderRepr): @@ -116,6 +118,17 @@ ll_builder.used = used @staticmethod + def ll_append_float(ll_builder, f): + used = ll_builder.used + T = lltype.typeOf(f) + size = rffi.sizeof(T) + if used + size > ll_builder.allocated: + ll_builder.grow(ll_builder, size) + + rffi.cast(FLOAT_ARRAY, lltype.direct_ptradd(ll_builder.buf.chars, used))[0] = f + ll_builder.used += size + + @staticmethod def ll_getlength(ll_builder): return ll_builder.used diff --git a/pypy/rpython/rbuilder.py b/pypy/rpython/rbuilder.py --- a/pypy/rpython/rbuilder.py +++ b/pypy/rpython/rbuilder.py @@ -1,8 +1,8 @@ +from pypy.annotation.model import SomeChar, SomeUnicodeCodePoint +from pypy.rlib.rstring import INIT_SIZE +from pypy.rpython.lltypesystem import lltype +from pypy.rpython.rmodel import Repr -from pypy.rpython.rmodel import Repr -from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rstring import INIT_SIZE -from pypy.annotation.model import SomeChar, SomeUnicodeCodePoint class AbstractStringBuilderRepr(Repr): def rtyper_new(self, hop): @@ -39,6 +39,11 @@ hop.exception_cannot_occur() return hop.gendirectcall(self.ll_append_charpsize, *vlist) + def rtype_method_append_float(self, hop): + vlist = hop.inputargs(self, lltype.Float) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll_append_float, *vlist) + def rtype_method_getlength(self, hop): vlist = hop.inputargs(self) hop.exception_cannot_occur() @@ -53,7 +58,7 @@ vlist = hop.inputargs(self) hop.exception_cannot_occur() return hop.gendirectcall(self.ll_is_true, *vlist) - + def convert_const(self, value): if not value is None: raise TypeError("Prebuilt builedrs that are not none unsupported") diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -90,7 +90,7 @@ if s: s.append("3") return bool(s) - + def func(i): if i: s = StringBuilder() @@ -107,7 +107,7 @@ if s: s.append(u"3") return bool(s) - + def func(i): if i: s = UnicodeBuilder() @@ -119,6 +119,19 @@ res = self.interpret(func, [1]) assert res + def test_append_float(self): + def func(d): + s = StringBuilder() + s.append("abc") + s.append_float(d) + s.append("abc") + return s.build() + + res = self.ll_to_string(self.interpret(func, [3.0])) + assert res == "abc\x00\x00\x00\x00\x00\x00\x08 at abc" + + res = self.ll_to_string(self.interpret(func, [r_singlefloat(2.0)])) + assert res == "abc\x00\x00\x00 at abc" class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): pass From noreply at buildbot.pypy.org Sat Mar 3 18:51:13 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 18:51:13 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: maybe fix translation Message-ID: <20120303175113.E7D898204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53143:18a59fca2166 Date: 2012-03-03 09:50 -0800 http://bitbucket.org/pypy/pypy/changeset/18a59fca2166/ Log: maybe fix translation diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -821,8 +821,8 @@ class ToStringArray(Call1): def __init__(self, child): dtype = child.find_dtype() - self.itemsize = dtype.itemtype.get_element_size() - self.s = StringBuilder(child.size * self.itemsize) + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) self.res = W_NDimArray([1], dtype, 'C') diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -4,6 +4,7 @@ ViewTransform, BroadcastTransform from pypy.tool.pairtype import extendabletype from pypy.module.micronumpy.loop import ComputationDone +from pypy.rlib import jit """ Signature specifies both the numpy expression that has been constructed and the assembler to be compiled. This is a very important observation - @@ -321,13 +322,14 @@ def __init__(self, dtype, child): Call1.__init__(self, None, 'tostring', dtype, child) + @jit.unroll_safe def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) - for i in range(arr.itemsize): + for i in range(arr.item_size): arr.s.append(arr.res_casted[i]) class BroadcastLeft(Call2): From noreply at buildbot.pypy.org Sat Mar 3 18:54:59 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 18:54:59 +0100 (CET) Subject: [pypy-commit] pypy default: add translation test for select Message-ID: <20120303175459.EE4968204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53144:8dbdd0b3cf40 Date: 2012-03-03 09:54 -0800 http://bitbucket.org/pypy/pypy/changeset/8dbdd0b3cf40/ Log: add translation test for select diff --git a/pypy/module/select/test/test_ztranslation.py b/pypy/module/select/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_select_translates(): + checkmodule('select') From noreply at buildbot.pypy.org Sat Mar 3 18:57:38 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 18:57:38 +0100 (CET) Subject: [pypy-commit] pypy struct-double: make the test run Message-ID: <20120303175738.117398204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: struct-double Changeset: r53145:c1a5fe999a88 Date: 2012-03-03 09:57 -0800 http://bitbucket.org/pypy/pypy/changeset/c1a5fe999a88/ Log: make the test run diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -125,7 +125,7 @@ if used + size > ll_builder.allocated: ll_builder.grow(ll_builder, size) - rffi.cast(FLOAT_ARRAY, lltype.direct_ptradd(ll_builder.buf.chars, used))[0] = f + rffi.cast(FLOAT_ARRAY, rffi.ptradd(rffi.cast(rffi.VOIDP, ll_builder.buf.chars), used))[0] = f ll_builder.used += size @staticmethod From noreply at buildbot.pypy.org Sat Mar 3 19:02:00 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 19:02:00 +0100 (CET) Subject: [pypy-commit] pypy struct-double: closer to passing? Message-ID: <20120303180200.700908204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53146:672e875c6730 Date: 2012-03-03 13:01 -0500 http://bitbucket.org/pypy/pypy/changeset/672e875c6730/ Log: closer to passing? diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -51,8 +51,6 @@ ('buf', lltype.Ptr(UNICODE)), adtmeths={'grow':staticAdtMethod(unicodebuilder_grow)}) -FLOAT_ARRAY = lltype.Ptr(lltype.Array(lltype.Float)) - MAX = 16*1024*1024 class BaseStringBuilderRepr(AbstractStringBuilderRepr): @@ -125,7 +123,7 @@ if used + size > ll_builder.allocated: ll_builder.grow(ll_builder, size) - rffi.cast(FLOAT_ARRAY, rffi.ptradd(rffi.cast(rffi.VOIDP, ll_builder.buf.chars), used))[0] = f + rffi.cast(rffi.CArrayPtr(T), rffi.ptradd(rffi.cast(rffi.VOIDP, ll_builder.buf.chars), used))[0] = f ll_builder.used += size @staticmethod From noreply at buildbot.pypy.org Sat Mar 3 19:04:47 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Mar 2012 19:04:47 +0100 (CET) Subject: [pypy-commit] pypy struct-double: run directly as well Message-ID: <20120303180447.CF9068204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: struct-double Changeset: r53147:779f58b280ee Date: 2012-03-03 10:04 -0800 http://bitbucket.org/pypy/pypy/changeset/779f58b280ee/ Log: run directly as well diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -127,8 +127,10 @@ s.append("abc") return s.build() + expected = "abc\x00\x00\x00\x00\x00\x00\x08 at abc" + assert func(3.0) == expected res = self.ll_to_string(self.interpret(func, [3.0])) - assert res == "abc\x00\x00\x00\x00\x00\x00\x08 at abc" + assert res == expected res = self.ll_to_string(self.interpret(func, [r_singlefloat(2.0)])) assert res == "abc\x00\x00\x00 at abc" From noreply at buildbot.pypy.org Sat Mar 3 19:22:37 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Mar 2012 19:22:37 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Hack hack hack. Now realloc() is only called if needed. Message-ID: <20120303182237.22DC38204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53148:4c28945b23e6 Date: 2012-03-03 19:22 +0100 http://bitbucket.org/pypy/pypy/changeset/4c28945b23e6/ Log: Hack hack hack. Now realloc() is only called if needed. Should improve things a lot. On the negative side it seems that we need to save and restore the XMM registers too. diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -39,6 +39,9 @@ # has them stored in (ebp+8), (ebp+12), etc. OFFSTACK_START_AT_WORD = 2 # +# (ebp+4) has the size allocated so far +OFFSTACK_SIZE_ALLOCATED = 1 +# # In stacklet mode, the real frame contains always just OFFSTACK_REAL_FRAME # words reserved for temporary usage like call arguments. To maintain # alignment on 32-bit, OFFSTACK_REAL_FRAME % 4 == 3, and it is at least 17 diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -15,7 +15,8 @@ from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64, OFFSTACK_REAL_FRAME, - OFFSTACK_START_AT_WORD) + OFFSTACK_START_AT_WORD, + OFFSTACK_SIZE_ALLOCATED) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, @@ -125,6 +126,7 @@ support.ensure_sse2_floats() self._build_float_constants() self._build_propagate_exception_path() + self._build_realloc_bridge_slowpath() if gc_ll_descr.get_malloc_slowpath_addr is not None: self._build_malloc_slowpath() self._build_stack_check_slowpath() @@ -281,6 +283,93 @@ self.propagate_exception_path = rawstart self.mc = None + def _build_realloc_bridge_slowpath(self): + from pypy.jit.backend.x86.regalloc import gpr_reg_mgr_cls + # This defines a function called at the start of a bridge to + # increase the size of the off-stack frame. It must preserve + # all registers. + # + # XXX optimize more: should also patch the original malloc() + # call to directly allocate enough + # + # see _enter_bridge_code() for the following constant: the + # new size is not passed explicitly, but needs to be fished + # from the code at (retaddr - WORD * realloc_bridge_ofs). + # This is commented as "fish fish fish" below. + if IS_X86_32: + self.realloc_bridge_ofs = 11 + elif IS_X86_64: + self.realloc_bridge_ofs = 19 + # + self.mc = codebuf.MachineCodeBlockWrapper() + # + # First, save all registers that this code might modify. + # Assume that the xmm registers are safe. Note that this code + # will save some registers in the caller's frame, in the + # temporary OFFSTACK_REAL_FRAME words. + save_regs = gpr_reg_mgr_cls.save_around_call_regs + if IS_X86_32: + assert OFFSTACK_REAL_FRAME >= 2 + assert len(save_regs) == 3 + # there are 3 PUSHes in total here. With the retaddr, the + # stack remains aligned. + self.mc.MOV_sr(1*WORD, save_regs[0].value) + self.mc.MOV_sr(2*WORD, save_regs[1].value) + self.mc.PUSH_r(save_regs[2].value) + # + # fish fish fish (see above) + self.mc.MOV_rs(eax.value, WORD) # load the retaddr + self.mc.PUSH_m((eax.value, -self.realloc_bridge_ofs)) + # + self.mc.LEA_rb(eax.value, -WORD * (FRAME_FIXED_SIZE-1)) + self.mc.PUSH_r(eax.value) + # + elif IS_X86_64: + assert OFFSTACK_REAL_FRAME >= len(save_regs) - 1 + # there is only 1 PUSH in total here. With the retaddr, the + # stack remains aligned. + for j in range(len(save_regs)-1, 0, -1): + self.mc.MOV_sr(j*WORD, save_regs[j].value) + self.mc.PUSH_r(save_regs[0].value) + # + # fish fish fish (see above) + self.mc.MOV_rs(esi.value, WORD) # load the retaddr + self.mc.MOV32_rm(esi.value, (esi.value, + -self.realloc_bridge_ofs)) + # + self.mc.LEA_rb(edi.value, -WORD * (FRAME_FIXED_SIZE-1)) + # + self.mc.CALL(imm(self.offstack_realloc_addr)) + # + # load the updated ebp + self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) + # + # fix the OFFSTACK_SIZE_ALLOCATED in the updated memory location + if IS_X86_32: + self.mc.ADD_ri(esp.value, 2*WORD) + self.mc.MOV_rs(eax.value, WORD) # load the retaddr again + self.mc.MOV32_rm(eax.value, (eax.value, -self.realloc_bridge_ofs)) + self.mc.MOV_br(WORD * OFFSTACK_SIZE_ALLOCATED, eax.value) + # + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap is not None and gcrootmap.is_shadow_stack: + self._fixup_shadowstack_location(gcrootmap) + # + # restore all registers and return + if IS_X86_32: + self.mc.POP_r(save_regs[2].value) + self.mc.MOV_rs(save_regs[1].value, 2*WORD) + self.mc.MOV_rs(save_regs[0].value, 1*WORD) + elif IS_X86_64: + self.mc.POP_r(save_regs[0].value) + for j in range(len(save_regs)-1, 0, -1): + self.mc.MOV_rs(save_regs[j].value, j*WORD) + self.mc.RET() + # + rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) + self.realloc_bridge_addr = rawstart + self.mc = None + def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0: @@ -727,56 +816,20 @@ return self.mc.get_relative_pos() - 4 def _enter_bridge_code(self, regalloc): - # XXX XXX far too heavy saving and restoring - j = 0 - if self.cpu.supports_floats: - for reg in regalloc.xrm.save_around_call_regs: - self.mc.MOVSD_sx(j, reg.value) - j += 8 - # - save_regs = regalloc.rm.save_around_call_regs - if IS_X86_32: - assert len(save_regs) == 3 - self.mc.MOV_sr(j, save_regs[0].value) - self.mc.PUSH_r(save_regs[1].value) - self.mc.PUSH_r(save_regs[2].value) - # 4 PUSHes in total, stack remains aligned - self.mc.PUSH_i32(0x77777777) # patched later - result = self.mc.get_relative_pos() - 4 - self.mc.LEA_rb(eax.value, -WORD * (FRAME_FIXED_SIZE-1)) - self.mc.PUSH_r(eax.value) - elif IS_X86_64: - # an even number of PUSHes, stack remains aligned - assert len(save_regs) & 1 == 0 - for reg in save_regs: - self.mc.PUSH_r(reg.value) - self.mc.LEA_rb(edi.value, -WORD * (FRAME_FIXED_SIZE-1)) - self.mc.MOV_riu32(esi.value, 0x77777777) # patched later - result = self.mc.get_relative_pos() - 4 - # - self.mc.CALL(imm(self.offstack_realloc_addr)) - # - self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) - # - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - if gcrootmap is not None and gcrootmap.is_shadow_stack: - self._fixup_shadowstack_location(gcrootmap) - # - if IS_X86_32: - self.mc.ADD_ri(esp.value, 2*WORD) - self.mc.POP_r(save_regs[2].value) - self.mc.POP_r(save_regs[1].value) - self.mc.MOV_rs(save_regs[0].value, j) - elif IS_X86_64: - for i in range(len(save_regs)-1, -1, -1): - self.mc.POP_r(save_regs[i].value) - # - if self.cpu.supports_floats: - j = 0 - for reg in regalloc.xrm.save_around_call_regs: - self.mc.MOVSD_xs(reg.value, j) - j += 8 - # + self.mc.CMP_bi(WORD * OFFSTACK_SIZE_ALLOCATED, 0x77777777) + result = self.mc.get_relative_pos() - 4 + self.mc.J_il8(rx86.Conditions['NB'], 0) # JNB .skip + jnb_location = self.mc.get_relative_pos() + if WORD == 4: + self.mc.CALL(imm(self.realloc_bridge_addr)) + else: + # must always use the long, 13-bytes encoding here + self.mc.MOV_ri64(r11.value, self.realloc_bridge_addr) + self.mc.CALL_r(r11.value) + assert self.mc.get_relative_pos() - result == self.realloc_bridge_ofs + offset = self.mc.get_relative_pos() - jnb_location + assert 0 <= offset <= 127 + self.mc.overwrite(jnb_location-1, chr(offset)) return result def _patch_stackadjust(self, adr_to_fix, allocated_depth): @@ -802,38 +855,49 @@ return -WORD * aligned_words def _call_header(self): - # NB. the shape of the frame is hard-coded in get_basic_shape() too. - # Also, make sure this is consistent with FRAME_FIXED_SIZE. + # the frame has always a fixed size of OFFSTACK_REAL_FRAME words. + self.mc.SUB_ri(esp.value, WORD * OFFSTACK_REAL_FRAME) + # if IS_X86_32: - self.mc.SUB_ri(esp.value, WORD * (OFFSTACK_REAL_FRAME-1)) - self.mc.PUSH_i32(0x77777777) # temporary + # save (and later restore) the value of edi + self.mc.MOV_sr(WORD, edi.value) + self.mc.MOV_ri(edi.value, 0x77777777) # temporary elif IS_X86_64: - # XXX very heavily save and restore all possible argument registers + # XXX need to save and restore all possible argument registers save_regs = [r9, r8, ecx, edx, esi, edi] - save_xmm_regs = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - assert OFFSTACK_REAL_FRAME >= len(save_regs) + len(save_xmm_regs) - self.mc.SUB_ri(esp.value, WORD * OFFSTACK_REAL_FRAME) + assert OFFSTACK_REAL_FRAME > len(save_regs) for i in range(len(save_regs)): - self.mc.MOV_sr(WORD * i, save_regs[i].value) - base = len(save_regs) - for i in range(len(save_xmm_regs)): - self.mc.MOVSD_sx(WORD * (base + i), save_xmm_regs[i].value) - # + self.mc.MOV_sr(WORD * (1 + i), save_regs[i].value) + # assume that the XMM registers are safe. self.mc.MOV_riu32(edi.value, 0x77777777) # temporary frame_size_pos = self.mc.get_relative_pos() - 4 # + self.mc.MOV_sr(0, edi.value) self.mc.CALL(imm(self.offstack_malloc_addr)) # - if IS_X86_64: - for i in range(len(save_regs)): - self.mc.MOV_rs(save_regs[i].value, WORD * i) - base = len(save_regs) - for i in range(len(save_xmm_regs)): - self.mc.MOVSD_xs(save_xmm_regs[i].value, WORD * (base + i)) - # + # save in the freshly malloc'ed block the original value of ebp self.mc.MOV_mr((eax.value, WORD * (FRAME_FIXED_SIZE-1)), ebp.value) # (new ebp) <- ebp self.mc.LEA_rm(ebp.value, (eax.value, WORD * (FRAME_FIXED_SIZE-1))) + # + # save in OFFSTACK_SIZE_ALLOCATED the allocated size + if IS_X86_32: + # edi is preserved by the CALL above + self.mc.MOV_br(WORD * OFFSTACK_SIZE_ALLOCATED, edi.value) + # now restore to original value of edi + self.mc.MOV_rs(edi.value, WORD) + # + elif IS_X86_64: + # reload edi from the stack and save it in the freshly + # malloc'ed block + self.mc.MOV_rs(edi.value, 0) + self.mc.MOV_br(WORD * OFFSTACK_SIZE_ALLOCATED, edi.value) + # reload the original value of the save_regs (including edi) + for i in range(len(save_regs)): + self.mc.MOV_rs(save_regs[i].value, WORD * (1 + i)) + # + # save in the freshly malloc'ed block the original value of + # all other callee-saved registers for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)): loc = self.cpu.CALLEE_SAVE_REGISTERS[i] self.mc.MOV_br(WORD*(-1-i), loc.value) # (ebp-4-4*i) <- reg diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -531,6 +531,7 @@ PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) + PUSH_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) PUSH_i32 = insn('\x68', immediate(1, 'i')) POP_r = insn(rex_nw, register(1), '\x58') From noreply at buildbot.pypy.org Sat Mar 3 19:38:14 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Mar 2012 19:38:14 +0100 (CET) Subject: [pypy-commit] pypy default: Comment. Message-ID: <20120303183814.124A48204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53149:7c1747e11e4a Date: 2012-03-03 19:37 +0100 http://bitbucket.org/pypy/pypy/changeset/7c1747e11e4a/ Log: Comment. diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -62,6 +62,14 @@ @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): + """Copies 'length' characters from the 'src' string to the 'dst' + string, starting at position 'srcstart' and 'dststart'.""" + # xxx Warning: don't try to do this at home. It relies on a lot + # of details to be sure that it works correctly in all cases. + # Notably: no GC operation at all from the first cast_ptr_to_adr() + # because it might move the strings. The keepalive_until_here() + # are obscurely essential to make sure that the strings stay alive + # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 From noreply at buildbot.pypy.org Sat Mar 3 19:41:35 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 19:41:35 +0100 (CET) Subject: [pypy-commit] pypy struct-double: progress (thanks armin!) Message-ID: <20120303184135.BCF588204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53150:dfabc8453658 Date: 2012-03-03 13:40 -0500 http://bitbucket.org/pypy/pypy/changeset/dfabc8453658/ Log: progress (thanks armin!) diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -3,7 +3,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.annlowlevel import llstr from pypy.rpython.rptr import PtrRepr -from pypy.rpython.lltypesystem import lltype, rffi, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr from pypy.rpython.lltypesystem.lltype import staticAdtMethod, nullptr from pypy.rpython.lltypesystem.rstr import (STR, UNICODE, char_repr, string_repr, unichar_repr, unicode_repr) @@ -117,13 +117,18 @@ @staticmethod def ll_append_float(ll_builder, f): + T = lltype.typeOf(f) + BUF_T = lltype.typeOf(ll_builder.buf).TO + used = ll_builder.used - T = lltype.typeOf(f) size = rffi.sizeof(T) if used + size > ll_builder.allocated: ll_builder.grow(ll_builder, size) - rffi.cast(rffi.CArrayPtr(T), rffi.ptradd(rffi.cast(rffi.VOIDP, ll_builder.buf.chars), used))[0] = f + chars_offset = llmemory.offsetof(BUF_T, 'chars') + llmemory.itemoffsetof(BUF_T.chars, 0) + array = llmemory.cast_ptr_to_adr(ll_builder.buf) + chars_offset + ptr = llmemory.cast_adr_to_int(array) + used + rffi.cast(rffi.CArrayPtr(T), ptr)[0] ll_builder.used += size @staticmethod From noreply at buildbot.pypy.org Sat Mar 3 19:41:36 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 19:41:36 +0100 (CET) Subject: [pypy-commit] pypy struct-double: merged upstream Message-ID: <20120303184136.E9F3C8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53151:190244e3bdcf Date: 2012-03-03 13:41 -0500 http://bitbucket.org/pypy/pypy/changeset/190244e3bdcf/ Log: merged upstream diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -127,8 +127,10 @@ s.append("abc") return s.build() + expected = "abc\x00\x00\x00\x00\x00\x00\x08 at abc" + assert func(3.0) == expected res = self.ll_to_string(self.interpret(func, [3.0])) - assert res == "abc\x00\x00\x00\x00\x00\x00\x08 at abc" + assert res == expected res = self.ll_to_string(self.interpret(func, [r_singlefloat(2.0)])) assert res == "abc\x00\x00\x00 at abc" From noreply at buildbot.pypy.org Sat Mar 3 19:44:34 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 19:44:34 +0100 (CET) Subject: [pypy-commit] pypy struct-double: progress? Message-ID: <20120303184434.DA6258204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53152:52ca642c459a Date: 2012-03-03 13:44 -0500 http://bitbucket.org/pypy/pypy/changeset/52ca642c459a/ Log: progress? diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -126,9 +126,8 @@ ll_builder.grow(ll_builder, size) chars_offset = llmemory.offsetof(BUF_T, 'chars') + llmemory.itemoffsetof(BUF_T.chars, 0) - array = llmemory.cast_ptr_to_adr(ll_builder.buf) + chars_offset - ptr = llmemory.cast_adr_to_int(array) + used - rffi.cast(rffi.CArrayPtr(T), ptr)[0] + array = llmemory.cast_ptr_to_adr(ll_builder.buf) + chars_offset + llmemory.sizeof(BUF_T.chars.OF) * used + rffi.cast(rffi.CArrayPtr(T), array)[0] ll_builder.used += size @staticmethod From noreply at buildbot.pypy.org Sat Mar 3 19:55:47 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Mar 2012 19:55:47 +0100 (CET) Subject: [pypy-commit] pypy struct-double: missing keepaliva Message-ID: <20120303185547.9B8098204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53153:058cb3a9294f Date: 2012-03-03 13:55 -0500 http://bitbucket.org/pypy/pypy/changeset/058cb3a9294f/ Log: missing keepaliva diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -1,5 +1,5 @@ from pypy.rlib import rgc, jit -from pypy.rlib.objectmodel import enforceargs +from pypy.rlib.objectmodel import enforceargs, keepalive_until_here from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.annlowlevel import llstr from pypy.rpython.rptr import PtrRepr @@ -128,6 +128,7 @@ chars_offset = llmemory.offsetof(BUF_T, 'chars') + llmemory.itemoffsetof(BUF_T.chars, 0) array = llmemory.cast_ptr_to_adr(ll_builder.buf) + chars_offset + llmemory.sizeof(BUF_T.chars.OF) * used rffi.cast(rffi.CArrayPtr(T), array)[0] + keepalive_until_here(ll_builder.buf) ll_builder.used += size @staticmethod From noreply at buildbot.pypy.org Sat Mar 3 22:12:09 2012 From: noreply at buildbot.pypy.org (justinpeel) Date: Sat, 3 Mar 2012 22:12:09 +0100 (CET) Subject: [pypy-commit] pypy default: speed up unpickling of datetime.time objects Message-ID: <20120303211209.5D2AE8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r53154:c3c5061a39af Date: 2012-03-03 14:11 -0700 http://bitbucket.org/pypy/pypy/changeset/c3c5061a39af/ Log: speed up unpickling of datetime.time objects diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): From noreply at buildbot.pypy.org Sat Mar 3 22:28:07 2012 From: noreply at buildbot.pypy.org (justinpeel) Date: Sat, 3 Mar 2012 22:28:07 +0100 (CET) Subject: [pypy-commit] pypy default: speed up unpickling of datetime.datetime objects Message-ID: <20120303212807.388C78204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r53155:3539e2d663f4 Date: 2012-03-03 14:27 -0700 http://bitbucket.org/pypy/pypy/changeset/3539e2d663f4/ Log: speed up unpickling of datetime.datetime objects diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1904,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): From pullrequests-noreply at bitbucket.org Sat Mar 3 23:17:54 2012 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sat, 03 Mar 2012 22:17:54 -0000 Subject: [pypy-commit] [OPEN] Pull request #29 for pypy/pypy: Finish kqueue support Message-ID: A new pull request has been opened by Tobias Oberstein. oberstet/pypy has changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/29/finish-kqueue-support Title: Finish kqueue support Working kqueue support. Translator test, all unit tests succeed, PyPy builds, Twisted trunk runs, Autobahn WebSocket testsuite runs. Changes to be pulled: 696dedf6f58f by Tobias Oberstein: "Merge upstream." a701fa0e939b by Tobias Oberstein: "Make all unit tests pass." fa4020666f4c by Tobias Oberstein: "Various fixes, implement kevent comparison." b11d2bf7c116 by Tobias Oberstein: "Merge upstream." 3d6ef205bbaa by Tobias Oberstein: "Some build fixes." 1996736bd620 by Tobias Oberstein: "Implement timeout forwarding." 39358fbeb957 by Tobias Oberstein: "Implement kqueue control." ff829d29d644 by Tobias Oberstein: "Complete symbols, streamline code, fix include, whitespace." e46a58cf8ee6 by Tobias Oberstein: "Fix module init, test init." b12df2b6eaa8 by Tobias Oberstein: "Merging trunk and resolving conflicts." cc37b011ea0f by Alex Gaynor: "a little work" 0b624568902e by Alex Gaynor: "merged upstream" 38bc1b0542fa by Alex Gaynor: "resolved merge conflicts" 32456c2f21a2 by Alex Gaynor: "Implemented kevent and started on kqueue." 1a5f26bde444 by Alex Gaynor: "Allow submodules for MixedModules, imported from my psycopg2 fork." 0a34a5b002d1 by Alex Gaynor: "Final typo fix." 4e7c33ca4b88 by Alex Gaynor: "One more typo fix." 089f3314fd8b by Alex Gaynor: "typo fix." 0b8353f1a1b4 by Alex Gaynor: "Missing import." 1ef20b0124c0 by Alex Gaynor: "Remove this test, it doesn't seem to pass under CPython." dae97b70bdfe by Alex Gaynor: "Fix a typo in the tests." 682046107ffe by Alex Gaynor: "Added tests and skeleton." -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sat Mar 3 23:31:24 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 03 Mar 2012 22:31:24 -0000 Subject: [pypy-commit] [COMMENT] Pull request #29 for pypy/pypy: Finish kqueue support In-Reply-To: References: Message-ID: <20120303223124.26639.73483@bitbucket05.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/29/finish-kqueue-support#comment-3541 Alex Gaynor (alex_gaynor) said: Overal: fantastic work for a first patch! A few small things (I'm anal about this): # `for symbol in symbol_map.keys():` -> `for symbol in symbols_map`, there's no need for `keys()` call. # Can you remove the comments with the C declarations? They're distracting, and don't add much since the llexternal call has the same API (of course) # Line 208, w_elist, that should elist_w. _w suffix means that it's an RPython list of interpreter objects. # Right now kevents can only be compared by ==, <, and >=. This probably needs to add <=, >, and !=, right? Great work! -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Mar 4 01:21:28 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 01:21:28 +0100 (CET) Subject: [pypy-commit] pypy struct-double: all the tests pass now Message-ID: <20120304002128.0D1E98204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: struct-double Changeset: r53156:65705b303582 Date: 2012-03-03 19:21 -0500 http://bitbucket.org/pypy/pypy/changeset/65705b303582/ Log: all the tests pass now diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -3,7 +3,7 @@ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC, - SomeFloat) + SomeFloat, SomeSingleFloat) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype @@ -146,7 +146,7 @@ return s_None def method_append_float(self, f): - assert isinstance(f, SomeFloat) + assert isinstance(f, SomeFloat) or isinstance(f, SomeSingleFloat) return s_None def method_getlength(self): diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -1,6 +1,6 @@ from pypy.rlib import rgc, jit -from pypy.rlib.objectmodel import enforceargs, keepalive_until_here from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.objectmodel import enforceargs, keepalive_until_here, specialize from pypy.rpython.annlowlevel import llstr from pypy.rpython.rptr import PtrRepr from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr @@ -9,6 +9,7 @@ string_repr, unichar_repr, unicode_repr) from pypy.rpython.rbuilder import AbstractStringBuilderRepr from pypy.tool.sourcetools import func_with_new_name +from pypy.translator.tool.cbuild import ExternalCompilationInfo # Think about heuristics below, maybe we can come up with something # better or at least compare it with list heuristics @@ -117,6 +118,15 @@ @staticmethod def ll_append_float(ll_builder, f): + StringBuilderRepr._append_float(ll_builder, f, float2memory) + + @staticmethod + def ll_append_single_float(ll_builder, f): + StringBuilderRepr._append_float(ll_builder, f, singlefloat2memory) + + @staticmethod + @specialize.argtype(1) + def _append_float(ll_builder, f, memory_func): T = lltype.typeOf(f) BUF_T = lltype.typeOf(ll_builder.buf).TO @@ -127,7 +137,7 @@ chars_offset = llmemory.offsetof(BUF_T, 'chars') + llmemory.itemoffsetof(BUF_T.chars, 0) array = llmemory.cast_ptr_to_adr(ll_builder.buf) + chars_offset + llmemory.sizeof(BUF_T.chars.OF) * used - rffi.cast(rffi.CArrayPtr(T), array)[0] + memory_func(f, rffi.cast(rffi.CCHARP, array)) keepalive_until_here(ll_builder.buf) ll_builder.used += size @@ -170,3 +180,39 @@ unicodebuilder_repr = UnicodeBuilderRepr() stringbuilder_repr = StringBuilderRepr() + + +eci = ExternalCompilationInfo(includes=['string.h'], + post_include_bits=[""" +void pypy__float2memory(double x, char *p) { + memcpy(p, (char *)&x, sizeof(double)); +} +void pypy__singlefloat2memory(float x, char *p) { + memcpy(p, (char *)&x, sizeof(float)); +} +"""]) + +def float2memory_emulator(f, c_ptr): + with lltype.scoped_alloc(rffi.CArray(lltype.Float), 1) as f_array: + f_array[0] = f + c_array = rffi.cast(rffi.CCHARP, f_array) + for i in range(rffi.sizeof(lltype.Float)): + c_ptr[i] = c_array[i] + +def singlefloat2memory_emulator(f, c_ptr): + with lltype.scoped_alloc(rffi.CArray(lltype.SingleFloat), 1) as f_array: + f_array[0] = f + c_array = rffi.cast(rffi.CCHARP, f_array) + for i in range(rffi.sizeof(lltype.SingleFloat)): + c_ptr[i] = c_array[i] + +float2memory = rffi.llexternal( + "pypy__float2memory", [lltype.Float, rffi.CCHARP], lltype.Void, + compilation_info=eci, _nowrapper=True, sandboxsafe=True, + _callable=float2memory_emulator +) +singlefloat2memory = rffi.llexternal( + "pypy__singlefloat2memory", [lltype.SingleFloat, rffi.CCHARP], lltype.Void, + compilation_info=eci, _nowrapper=True, sandboxsafe=True, + _callable=singlefloat2memory_emulator, +) \ No newline at end of file diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -1,10 +1,9 @@ import py from py.builtin import set -from pypy.rpython.lltypesystem.lltype import LowLevelType, Signed, Unsigned, Float, Char -from pypy.rpython.lltypesystem.lltype import Bool, Void, UniChar, typeOf, \ - Primitive, isCompatibleType, enforce, saferecursive, SignedLongLong, UnsignedLongLong -from pypy.rpython.lltypesystem.lltype import frozendict -from pypy.rpython.lltypesystem.lltype import identityhash +from pypy.rpython.lltypesystem.lltype import (LowLevelType, Signed, Unsigned, + Float, SingleFloat, Char, Bool, Void, UniChar, typeOf, Primitive, + isCompatibleType, enforce, saferecursive, SignedLongLong, UnsignedLongLong, + frozendict, identityhash) from pypy.rlib.rarithmetic import intmask from pypy.rlib import objectmodel from pypy.tool.uid import uid @@ -75,7 +74,7 @@ def _example(self): return _class(ROOT) - + Class = Class() class Instance(OOType): @@ -111,7 +110,7 @@ def __hash__(self): return object.__hash__(self) - + def _defl(self): return self._null @@ -153,7 +152,7 @@ _, meth = self._lookup(name) if meth is not None: raise TypeError("Cannot add field %r: method already exists" % name) - + if self._superclass is not None: if self._superclass._has_field(name): raise TypeError("Field %r exists in superclass" % name) @@ -161,7 +160,7 @@ if type(defn) is not tuple: if isinstance(defn, Meth): raise TypeError("Attempting to store method in field") - + fields[name] = (defn, defn._defl()) else: ootype, default = defn @@ -198,7 +197,7 @@ def _init_instance(self, instance): if self._superclass is not None: self._superclass._init_instance(instance) - + for name, (ootype, default) in self._fields.iteritems(): instance.__dict__[name] = enforce(ootype, default) @@ -512,6 +511,8 @@ "ll_append_char": Meth([CHARTP], Void), "ll_append": Meth([STRINGTP], Void), "ll_build": Meth([], STRINGTP), + "ll_append_float": Meth([Float], Void), + "ll_append_single_float": Meth([SingleFloat], Void), "ll_getlength": Meth([], Signed), }) self._setup_methods({}) @@ -612,7 +613,7 @@ def __hash__(self): if self.ITEM is None: raise TypeError("Can't hash uninitialized List type.") - return BuiltinADTType.__hash__(self) + return BuiltinADTType.__hash__(self) def __str__(self): return '%s(%s)' % (self.__class__.__name__, @@ -624,7 +625,7 @@ def _specialize(self, generic_types): ITEMTYPE = self._specialize_type(self.ITEM, generic_types) return self.__class__(ITEMTYPE) - + def _defl(self): return self._null @@ -643,7 +644,7 @@ # placeholders for types # make sure that each derived class has his own SELFTYPE_T # placeholder, because we want backends to distinguish that. - + SELFTYPE_T = object() ITEMTYPE_T = object() oopspec_name = 'list' @@ -693,7 +694,7 @@ def __hash__(self): if self.ITEM is None: raise TypeError("Can't hash uninitialized List type.") - return BuiltinADTType.__hash__(self) + return BuiltinADTType.__hash__(self) def __str__(self): return '%s(%s)' % (self.__class__.__name__, @@ -789,7 +790,7 @@ return False if not self._is_initialized() or not other._is_initialized(): return False # behave like a ForwardReference, i.e. compare by identity - return BuiltinADTType.__eq__(self, other) + return BuiltinADTType.__eq__(self, other) def __ne__(self, other): return not (self == other) @@ -811,7 +812,7 @@ self._KEYTYPE = KEYTYPE self._VALUETYPE = VALUETYPE self._init_methods() - + class CustomDict(Dict): def __init__(self, KEYTYPE=None, VALUETYPE=None): @@ -870,7 +871,7 @@ KEYTYPE = self._specialize_type(self._KEYTYPE, generic_types) VALUETYPE = self._specialize_type(self._VALUETYPE, generic_types) return self.__class__(KEYTYPE, VALUETYPE) - + # ____________________________________________________________ class _object(object): @@ -942,7 +943,7 @@ Class._null = nullruntimeclass class _instance(object): - + def __init__(self, INSTANCE): self.__dict__["_TYPE"] = INSTANCE INSTANCE._init_instance(self) @@ -957,7 +958,7 @@ DEFINST, meth = self._TYPE._lookup(name) if meth is not None: return meth._bound(DEFINST, self) - + self._TYPE._check_field(name) return self.__dict__[name] @@ -997,7 +998,7 @@ return self _enforce = _upcast - + def _downcast(self, INSTANCE): assert instanceof(self, INSTANCE) return self @@ -1022,7 +1023,7 @@ def __getattribute__(self, name): if name.startswith("_"): return object.__getattribute__(self, name) - + raise RuntimeError("Access to field in null object") def __setattr__(self, name, value): @@ -1188,7 +1189,7 @@ def __ne__(self, other): return not (self == other) - + def __hash__(self): return hash(frozendict(self.__dict__)) @@ -1226,7 +1227,7 @@ def __eq__(self, other): return self is other - + def __hash__(self): return id(self) @@ -1250,7 +1251,7 @@ class _meth(_callable): _bound_class = _bound_meth - + def __init__(self, METHOD, **attrs): assert isinstance(METHOD, Meth) _callable.__init__(self, METHOD, **attrs) @@ -1336,7 +1337,7 @@ return True else: return False - + def annotation_to_lltype(cls, ann): from pypy.annotation import model as annmodel return annmodel.annotation_to_lltype(ann) @@ -1541,6 +1542,14 @@ assert isinstance(s, _string) self._buf.append(s._str) + def ll_append_float(self, f): + import struct + self._buf.append(struct.pack("d", f)) + + def ll_append_single_float(self, f): + import struct + self._buf.append(struct.pack("f", f)) + def ll_build(self): if self._TYPE is StringBuilder: return make_string(''.join(self._buf)) @@ -1602,7 +1611,7 @@ return len(self._list) def _ll_resize_ge(self, length): - # NOT_RPYTHON + # NOT_RPYTHON if len(self._list) < length: diff = length - len(self._list) self._list += [self._TYPE.ITEM._defl()] * diff @@ -1638,7 +1647,7 @@ class _null_list(_null_mixin(_list), _list): def __init__(self, LIST): - self.__dict__["_TYPE"] = LIST + self.__dict__["_TYPE"] = LIST class _array(_builtin_type): def __init__(self, ARRAY, length): @@ -1671,7 +1680,7 @@ class _null_array(_null_mixin(_array), _array): def __init__(self, ARRAY): - self.__dict__["_TYPE"] = ARRAY + self.__dict__["_TYPE"] = ARRAY class _dict(_builtin_type): def __init__(self, DICT): @@ -1769,7 +1778,7 @@ def ll_go_next(self): # NOT_RPYTHON self._check_stamp() - self._index += 1 + self._index += 1 if self._index >= len(self._items): return False else: @@ -1780,7 +1789,7 @@ self._check_stamp() assert 0 <= self._index < len(self._items) return self._items[self._index][0] - + def ll_current_value(self): # NOT_RPYTHON self._check_stamp() @@ -1840,7 +1849,7 @@ class _null_record(_null_mixin(_record), _record): def __init__(self, RECORD): - self.__dict__["_TYPE"] = RECORD + self.__dict__["_TYPE"] = RECORD def new(TYPE): @@ -1932,7 +1941,7 @@ def ooupcast(INSTANCE, instance): return instance._upcast(INSTANCE) - + def oodowncast(INSTANCE, instance): return instance._downcast(INSTANCE) @@ -1959,7 +1968,7 @@ def oostring(obj, base): """ Convert char, int, float, instances and str to str. - + Base is used only for formatting int: for other types is ignored and should be set to -1. For int only base 8, 10 and 16 are supported. diff --git a/pypy/rpython/ootypesystem/rbuilder.py b/pypy/rpython/ootypesystem/rbuilder.py --- a/pypy/rpython/ootypesystem/rbuilder.py +++ b/pypy/rpython/ootypesystem/rbuilder.py @@ -9,7 +9,7 @@ class BaseBuilderRepr(AbstractStringBuilderRepr): def empty(self): return ootype.null(self.lowleveltype) - + @classmethod def ll_new(cls, init_size): if init_size < 0 or init_size > MAX: @@ -40,6 +40,14 @@ builder.ll_append_char(char) @staticmethod + def ll_append_float(builder, f): + builder.ll_append_float(f) + + @staticmethod + def ll_append_single_float(builder, f): + builder.ll_append_single_float(f) + + @staticmethod def ll_build(builder): return builder.ll_build() diff --git a/pypy/rpython/rbuilder.py b/pypy/rpython/rbuilder.py --- a/pypy/rpython/rbuilder.py +++ b/pypy/rpython/rbuilder.py @@ -1,5 +1,6 @@ from pypy.annotation.model import SomeChar, SomeUnicodeCodePoint from pypy.rlib.rstring import INIT_SIZE +from pypy.rpython.error import TyperError from pypy.rpython.lltypesystem import lltype from pypy.rpython.rmodel import Repr @@ -40,9 +41,14 @@ return hop.gendirectcall(self.ll_append_charpsize, *vlist) def rtype_method_append_float(self, hop): - vlist = hop.inputargs(self, lltype.Float) + try: + vlist = hop.inputargs(self, lltype.Float) + target = self.ll_append_float + except TyperError: + vlist = hop.inputargs(self, lltype.SingleFloat) + target = self.ll_append_single_float hop.exception_cannot_occur() - return hop.gendirectcall(self.ll_append_float, *vlist) + return hop.gendirectcall(target, *vlist) def rtype_method_getlength(self, hop): vlist = hop.inputargs(self) diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -1,6 +1,7 @@ from __future__ import with_statement import py +from pypy.rlib.rarithmetic import r_singlefloat from pypy.rlib.rstring import StringBuilder, UnicodeBuilder from pypy.rpython.annlowlevel import llstr, hlstr from pypy.rpython.lltypesystem import rffi From noreply at buildbot.pypy.org Sun Mar 4 02:57:12 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 02:57:12 +0100 (CET) Subject: [pypy-commit] pypy jit-frame-counter: Added call_id to DMPs. Message-ID: <20120304015712.E7E308204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-frame-counter Changeset: r53157:7dd6dbd0d39a Date: 2012-03-03 20:56 -0500 http://bitbucket.org/pypy/pypy/changeset/7dd6dbd0d39a/ Log: Added call_id to DMPs. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -604,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1490,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3061,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3106,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -110,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -149,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2085,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,15 +146,17 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) assert loop.operations[0].getarg(1)._get_str() == 'info' + assert loop.operations[0].getarg(2).getint() == 0 assert loop.operations[1].getarg(1)._get_str() == 'info' assert loop.operations[2].getarg(1)._get_str() == " info" + assert loop.operations[2].getarg(2).getint() == 1 assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' From noreply at buildbot.pypy.org Sun Mar 4 03:17:35 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Mar 2012 03:17:35 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: skip a test that's definitely not a blackbox one Message-ID: <20120304021735.5CF5F8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53158:d2e736858ae0 Date: 2012-03-03 18:17 -0800 http://bitbucket.org/pypy/pypy/changeset/d2e736858ae0/ Log: skip a test that's definitely not a blackbox one diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,3 +1,5 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix from pypy.interpreter.gateway import interp2app @@ -474,13 +476,6 @@ assert dtype('i8') == dtype(' Author: edelsohn Branch: ppc-jit-backend Changeset: r53159:3df13d1a708f Date: 2012-03-03 21:20 -0500 http://bitbucket.org/pypy/pypy/changeset/3df13d1a708f/ Log: Remove extra blank line. diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -340,7 +340,6 @@ mc.addi(r.SP.value, r.SP.value, frame_size) mc.b_abs(self.propagate_exception_path) - mc.prepare_insts_blocks() rawstart = mc.materialize(self.cpu.asmmemmgr, []) if IS_PPC_64: From noreply at buildbot.pypy.org Sun Mar 4 03:21:39 2012 From: noreply at buildbot.pypy.org (edelsohn) Date: Sun, 4 Mar 2012 03:21:39 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Don't save and restore r3 and r4 in REGLOC_TO_COPY_AREA_OFS. Message-ID: <20120304022139.B3A9C8204C@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r53160:1e9d770b673e Date: 2012-03-03 21:21 -0500 http://bitbucket.org/pypy/pypy/changeset/1e9d770b673e/ Log: Don't save and restore r3 and r4 in REGLOC_TO_COPY_AREA_OFS. diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py --- a/pypy/jit/backend/ppc/regalloc.py +++ b/pypy/jit/backend/ppc/regalloc.py @@ -46,33 +46,31 @@ save_around_call_regs = r.VOLATILES REGLOC_TO_COPY_AREA_OFS = { - r.r3: MY_COPY_OF_REGS + 0 * WORD, - r.r4: MY_COPY_OF_REGS + 1 * WORD, - r.r5: MY_COPY_OF_REGS + 2 * WORD, - r.r6: MY_COPY_OF_REGS + 3 * WORD, - r.r7: MY_COPY_OF_REGS + 4 * WORD, - r.r8: MY_COPY_OF_REGS + 5 * WORD, - r.r9: MY_COPY_OF_REGS + 6 * WORD, - r.r10: MY_COPY_OF_REGS + 7 * WORD, - r.r11: MY_COPY_OF_REGS + 8 * WORD, - r.r12: MY_COPY_OF_REGS + 9 * WORD, - r.r14: MY_COPY_OF_REGS + 10 * WORD, - r.r15: MY_COPY_OF_REGS + 11 * WORD, - r.r16: MY_COPY_OF_REGS + 12 * WORD, - r.r17: MY_COPY_OF_REGS + 13 * WORD, - r.r18: MY_COPY_OF_REGS + 14 * WORD, - r.r19: MY_COPY_OF_REGS + 15 * WORD, - r.r20: MY_COPY_OF_REGS + 16 * WORD, - r.r21: MY_COPY_OF_REGS + 17 * WORD, - r.r22: MY_COPY_OF_REGS + 18 * WORD, - r.r23: MY_COPY_OF_REGS + 19 * WORD, - r.r24: MY_COPY_OF_REGS + 20 * WORD, - r.r25: MY_COPY_OF_REGS + 21 * WORD, - r.r26: MY_COPY_OF_REGS + 22 * WORD, - r.r27: MY_COPY_OF_REGS + 23 * WORD, - r.r28: MY_COPY_OF_REGS + 24 * WORD, - r.r29: MY_COPY_OF_REGS + 25 * WORD, - r.r30: MY_COPY_OF_REGS + 26 * WORD, + r.r5: MY_COPY_OF_REGS + 0 * WORD, + r.r6: MY_COPY_OF_REGS + 1 * WORD, + r.r7: MY_COPY_OF_REGS + 2 * WORD, + r.r8: MY_COPY_OF_REGS + 3 * WORD, + r.r9: MY_COPY_OF_REGS + 4 * WORD, + r.r10: MY_COPY_OF_REGS + 5 * WORD, + r.r11: MY_COPY_OF_REGS + 6 * WORD, + r.r12: MY_COPY_OF_REGS + 7 * WORD, + r.r14: MY_COPY_OF_REGS + 8 * WORD, + r.r15: MY_COPY_OF_REGS + 9 * WORD, + r.r16: MY_COPY_OF_REGS + 10 * WORD, + r.r17: MY_COPY_OF_REGS + 11 * WORD, + r.r18: MY_COPY_OF_REGS + 12 * WORD, + r.r19: MY_COPY_OF_REGS + 13 * WORD, + r.r20: MY_COPY_OF_REGS + 14 * WORD, + r.r21: MY_COPY_OF_REGS + 15 * WORD, + r.r22: MY_COPY_OF_REGS + 16 * WORD, + r.r23: MY_COPY_OF_REGS + 17 * WORD, + r.r24: MY_COPY_OF_REGS + 18 * WORD, + r.r25: MY_COPY_OF_REGS + 19 * WORD, + r.r26: MY_COPY_OF_REGS + 20 * WORD, + r.r27: MY_COPY_OF_REGS + 21 * WORD, + r.r28: MY_COPY_OF_REGS + 22 * WORD, + r.r29: MY_COPY_OF_REGS + 23 * WORD, + r.r30: MY_COPY_OF_REGS + 24 * WORD, } def __init__(self, longevity, frame_manager=None, assembler=None): From noreply at buildbot.pypy.org Sun Mar 4 03:26:22 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 03:26:22 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: merged upstream Message-ID: <20120304022622.706F48204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-record-dtypes Changeset: r53161:070a10dbf7b0 Date: 2012-03-03 21:08 -0500 http://bitbucket.org/pypy/pypy/changeset/070a10dbf7b0/ Log: merged upstream diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): @@ -1903,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): diff --git a/pypy/module/select/test/test_ztranslation.py b/pypy/module/select/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_select_translates(): + checkmodule('select') diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -62,6 +62,14 @@ @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): + """Copies 'length' characters from the 'src' string to the 'dst' + string, starting at position 'srcstart' and 'dststart'.""" + # xxx Warning: don't try to do this at home. It relies on a lot + # of details to be sure that it works correctly in all cases. + # Notably: no GC operation at all from the first cast_ptr_to_adr() + # because it might move the strings. The keepalive_until_here() + # are obscurely essential to make sure that the strings stay alive + # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 From noreply at buildbot.pypy.org Sun Mar 4 03:26:24 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 03:26:24 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: view notes Message-ID: <20120304022624.02BA38204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-record-dtypes Changeset: r53162:7eafa70ab85e Date: 2012-03-03 21:25 -0500 http://bitbucket.org/pypy/pypy/changeset/7eafa70ab85e/ Log: view notes diff --git a/REVIEW.rst b/REVIEW.rst new file mode 100644 --- /dev/null +++ b/REVIEW.rst @@ -0,0 +1,11 @@ +REVIEW +====== + +* Why is width == 1 in W_VoidBox.descr_{get,set}item? That doesn't seem right. +* expose endianess on dtypes +* RecordType.str_format should use Builder +* IntP and UIntP aren't the right size, they should be the same size of rffi.VOIDP, not as Signed/Unsigned +* Instead of setup() can we please have get_alignment on the Type class. +* Need more tests for nested record types, I'm pretty sure they're broken. +* kill all the trailing whitespace ;) +* Fix failing tests. From noreply at buildbot.pypy.org Sun Mar 4 03:26:25 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 03:26:25 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: merged upstream Message-ID: <20120304022625.2EF128204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-record-dtypes Changeset: r53163:88fa13dc105e Date: 2012-03-03 21:26 -0500 http://bitbucket.org/pypy/pypy/changeset/88fa13dc105e/ Log: merged upstream diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,3 +1,5 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix from pypy.interpreter.gateway import interp2app @@ -474,13 +476,6 @@ assert dtype('i8') == dtype(' Author: Alex Gaynor Branch: jit-frame-counter Changeset: r53164:73c0184985f5 Date: 2012-03-03 22:07 -0500 http://bitbucket.org/pypy/pypy/changeset/73c0184985f5/ Log: fix for jit hooks diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 From noreply at buildbot.pypy.org Sun Mar 4 04:17:33 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Mar 2012 04:17:33 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: work in progress Message-ID: <20120304031733.6A33D8204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53165:b07f0c00db53 Date: 2012-03-03 19:00 -0800 http://bitbucket.org/pypy/pypy/changeset/b07f0c00db53/ Log: work in progress diff --git a/REVIEW.rst b/REVIEW.rst --- a/REVIEW.rst +++ b/REVIEW.rst @@ -1,7 +1,6 @@ REVIEW ====== -* Why is width == 1 in W_VoidBox.descr_{get,set}item? That doesn't seem right. * expose endianess on dtypes * RecordType.str_format should use Builder * IntP and UIntP aren't the right size, they should be the same size of rffi.VOIDP, not as Signed/Unsigned diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -197,9 +197,10 @@ class W_FlexibleBox(W_GenericBox): - def __init__(self, arr, ofs): + def __init__(self, arr, ofs, dtype): self.arr = arr # we have to keep array alive self.ofs = ofs + self.dtype = dtype def get_dtype(self, space): return self.arr.dtype @@ -212,16 +213,16 @@ @unwrap_spec(item=str) def descr_getitem(self, space, item): try: - ofs, dtype = self.arr.dtype.fields[item] + ofs, dtype = self.dtype.fields[item] except KeyError: raise OperationError(space.w_IndexError, space.wrap("Field %s does not exist" % item)) - return dtype.itemtype.read(self.arr, 1, self.ofs, ofs) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) @unwrap_spec(item=str) def descr_setitem(self, space, item, w_value): try: - ofs, dtype = self.arr.dtype.fields[item] + ofs, dtype = self.dtype.fields[item] except KeyError: raise OperationError(space.w_IndexError, space.wrap("Field %s does not exist" % item)) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1919,3 +1919,16 @@ from _numpypy import array a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' + + def test_nested_dtype(self): + from _numpypy import zeros + a = [('x', int), ('y', float)] + b = [('x', int), ('y', a)] + arr = zeros(3, dtype=b) + arr[1]['x'] = 15 + assert arr[1]['x'] == 15 + arr[1]['y']['y'] = 3.5 + assert arr[1]['y']['y'] == 3.5 + assert arr[1]['y']['x'] == 0.0 + assert arr[1]['x'] == 15 + diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -116,7 +116,7 @@ else: return libffi.array_getitem_T(self.T, width, storage, i, offset) - def read(self, arr, width, i, offset): + def read(self, arr, width, i, offset, dtype=None): return self.box(self._read(arr.storage, width, i, offset)) def read_bool(self, arr, width, i, offset): @@ -675,8 +675,10 @@ class RecordType(CompositeType): T = lltype.Char - def read(self, arr, width, i, offset): - return interp_boxes.W_VoidBox(arr, i) + def read(self, arr, width, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i, dtype) @jit.unroll_safe def coerce(self, space, dtype, w_item): From noreply at buildbot.pypy.org Sun Mar 4 04:17:35 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Mar 2012 04:17:35 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: alex, you can make stupid jokes Message-ID: <20120304031735.014618204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53166:89c7d9016d6b Date: 2012-03-03 19:17 -0800 http://bitbucket.org/pypy/pypy/changeset/89c7d9016d6b/ Log: alex, you can make stupid jokes diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -678,7 +678,7 @@ def read(self, arr, width, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_VoidBox(arr, i, dtype) + return interp_boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe def coerce(self, space, dtype, w_item): From noreply at buildbot.pypy.org Sun Mar 4 04:18:39 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Mar 2012 04:18:39 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: one more missing arg Message-ID: <20120304031839.E6B068204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53167:e260673aa286 Date: 2012-03-03 19:18 -0800 http://bitbucket.org/pypy/pypy/changeset/e260673aa286/ Log: one more missing arg diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -704,7 +704,7 @@ w_item = items_w[i] w_box = itemtype.coerce(space, subdtype, w_item) itemtype.store(arr, 1, 0, ofs, w_box) - return interp_boxes.W_VoidBox(arr, 0) + return interp_boxes.W_VoidBox(arr, 0, arr.dtype) @jit.unroll_safe def store(self, arr, _, i, ofs, box): From noreply at buildbot.pypy.org Sun Mar 4 04:37:14 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Mar 2012 04:37:14 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: fix some tests Message-ID: <20120304033714.944D68204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-record-dtypes Changeset: r53168:f7f81a16d30c Date: 2012-03-03 19:36 -0800 http://bitbucket.org/pypy/pypy/changeset/f7f81a16d30c/ Log: fix some tests diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -241,7 +241,7 @@ arr = W_NDimArray([1], new_string_dtype(space, len(arg))) for i in range(len(arg)): arr.storage[i] = arg[i] - return W_StringBox(arr, 0) + return W_StringBox(arr, 0, arr.dtype) class W_UnicodeBox(W_CharacterBox): @@ -254,7 +254,7 @@ # XXX not this way, we need store #for i in range(len(arg)): # arr.storage[i] = arg[i] - return W_UnicodeBox(arr, 0) + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", From noreply at buildbot.pypy.org Sun Mar 4 04:47:57 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 04:47:57 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: slightly cleaner Message-ID: <20120304034757.7BFFE8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-record-dtypes Changeset: r53169:7e4c821d088f Date: 2012-03-03 22:47 -0500 http://bitbucket.org/pypy/pypy/changeset/7e4c821d088f/ Log: slightly cleaner diff --git a/pypy/rlib/rarithmetic.py b/pypy/rlib/rarithmetic.py --- a/pypy/rlib/rarithmetic.py +++ b/pypy/rlib/rarithmetic.py @@ -521,9 +521,8 @@ from pypy.rpython.lltypesystem import lltype, rffi T = lltype.typeOf(arg) - if T != rffi.LONGLONG and T != rffi.ULONGLONG and T != rffi.UINT: - arg = rffi.cast(lltype.Signed, arg) - # XXX we cannot do arithmetics on small ints + # XXX we cannot do arithmetics on small ints + arg = widen(arg) if rffi.sizeof(T) == 1: res = arg elif rffi.sizeof(T) == 2: From noreply at buildbot.pypy.org Sun Mar 4 04:47:59 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 04:47:59 +0100 (CET) Subject: [pypy-commit] pypy numpy-record-dtypes: merged upstream Message-ID: <20120304034759.109068204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-record-dtypes Changeset: r53170:f65fea78b04c Date: 2012-03-03 22:47 -0500 http://bitbucket.org/pypy/pypy/changeset/f65fea78b04c/ Log: merged upstream diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -241,7 +241,7 @@ arr = W_NDimArray([1], new_string_dtype(space, len(arg))) for i in range(len(arg)): arr.storage[i] = arg[i] - return W_StringBox(arr, 0) + return W_StringBox(arr, 0, arr.dtype) class W_UnicodeBox(W_CharacterBox): @@ -254,7 +254,7 @@ # XXX not this way, we need store #for i in range(len(arg)): # arr.storage[i] = arg[i] - return W_UnicodeBox(arr, 0) + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", From noreply at buildbot.pypy.org Sun Mar 4 04:59:04 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 04:59:04 +0100 (CET) Subject: [pypy-commit] pypy jit-frame-counter: test fix Message-ID: <20120304035904.4C1E58204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-frame-counter Changeset: r53171:b205daec6f30 Date: 2012-03-03 22:58 -0500 http://bitbucket.org/pypy/pypy/changeset/b205daec6f30/ Log: test fix diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -152,12 +152,12 @@ debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[0].getarg(2).getint() == 0 - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[2].getarg(2).getint() == 1 - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): From noreply at buildbot.pypy.org Sun Mar 4 08:15:38 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 08:15:38 +0100 (CET) Subject: [pypy-commit] pypy jit-frame-counter: branch done Message-ID: <20120304071538.1B25882008@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-frame-counter Changeset: r53172:4470b3d0083c Date: 2012-03-04 02:14 -0500 http://bitbucket.org/pypy/pypy/changeset/4470b3d0083c/ Log: branch done From noreply at buildbot.pypy.org Sun Mar 4 08:15:40 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 08:15:40 +0100 (CET) Subject: [pypy-commit] pypy default: Adds a unique call_id to each source-language call in the JIT Message-ID: <20120304071540.74D1282008@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53173:d6976c97fc6c Date: 2012-03-04 02:15 -0500 http://bitbucket.org/pypy/pypy/changeset/d6976c97fc6c/ Log: Adds a unique call_id to each source-language call in the JIT diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -604,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1490,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3061,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3106,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -110,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -149,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2085,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' From noreply at buildbot.pypy.org Sun Mar 4 12:25:18 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 12:25:18 +0100 (CET) Subject: [pypy-commit] pypy default: Tentative simplification: kill 'param_depth'. See Message-ID: <20120304112518.67FC682008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53174:da0facae99dc Date: 2012-03-04 00:53 +0100 http://bitbucket.org/pypy/pypy/changeset/da0facae99dc/ Log: Tentative simplification: kill 'param_depth'. See needed_extra_stack_locations(). diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -88,7 +88,6 @@ self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self.fail_boxes_count = 0 - self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 self.propagate_exception_path = 0 @@ -442,10 +441,8 @@ looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - clt.param_depth = -1 # temporarily - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -459,8 +456,7 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -500,14 +496,13 @@ assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) - fail_depths = faildescr._x86_current_depths startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs) stackadjustpos = self._patchable_stackadjust() - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -517,19 +512,16 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -700,15 +692,12 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.get_frame_depth() - param_depth = regalloc.param_depth + frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - param_depth = max(param_depth, target_param_depth) - return frame_depth, param_depth + return frame_depth def _patchable_stackadjust(self): # stack adjustment LEA @@ -892,10 +881,9 @@ genop_math_list[oopspecindex](self, op, arglocs, resloc) def regalloc_perform_with_guard(self, op, guard_op, faillocs, - arglocs, resloc, current_depths): + arglocs, resloc): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - faildescr._x86_current_depths = current_depths failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, @@ -911,10 +899,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, - current_depths): + def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc): self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, - resloc, current_depths) + resloc) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) @@ -1038,13 +1025,14 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) + # + self._regalloc.needed_extra_stack_locations(p//WORD) def _fix_stdcall(self, callconv, p): from pypy.rlib.clibffi import FFI_STDCALL @@ -1127,9 +1115,9 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index) + self._regalloc.needed_extra_stack_locations(len(pass_on_stack)) def call(self, addr, args, res): force_index = self.write_new_force_index() @@ -2136,7 +2124,6 @@ if reg in save_registers: self.mc.MOV_sr(p, reg.value) p += WORD - self._regalloc.reserve_param(p//WORD) # if gcrootmap.is_shadow_stack: args = [] @@ -2192,6 +2179,7 @@ if reg in save_registers: self.mc.MOV_rs(reg.value, p) p += WORD + self._regalloc.needed_extra_stack_locations(p//WORD) def call_reacquire_gil(self, gcrootmap, save_loc): # save the previous result (eax/xmm0) into the stack temporarily. @@ -2199,7 +2187,6 @@ # to save xmm0 in this case. if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_sr(WORD, save_loc.value) - self._regalloc.reserve_param(2) # call the reopenstack() function (also reacquiring the GIL) if gcrootmap.is_shadow_stack: args = [] @@ -2219,6 +2206,7 @@ # restore the result from the stack if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_rs(save_loc.value, WORD) + self._regalloc.needed_extra_stack_locations(2) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2495,11 +2483,6 @@ # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - # reserve room for the argument to the real malloc and the - # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 - # word) - self._regalloc.reserve_param(1+16) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: @@ -2510,6 +2493,11 @@ slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) + # reserve room for the argument to the real malloc and the + # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 + # word) + self._regalloc.needed_extra_stack_locations(1+16) + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -168,7 +168,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - self.param_depth = 0 + self.min_frame_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -193,11 +193,9 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, - allgcrefs): + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -205,8 +203,15 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - def reserve_param(self, n): - self.param_depth = max(self.param_depth, n) + def needed_extra_stack_locations(self, n): + # call *after* you needed extra stack locations: (%esp), (%esp+4)... + min_frame_depth = self.fm.get_frame_depth() + n + if min_frame_depth > self.min_frame_depth: + self.min_frame_depth = min_frame_depth + + def get_final_frame_depth(self): + self.needed_extra_stack_locations(0) # update min_frame_depth + return self.min_frame_depth def _set_initial_bindings(self, inputargs): if IS_X86_64: @@ -376,25 +381,12 @@ def locs_for_fail(self, guard_op): return [self.loc(v) for v in guard_op.getfailargs()] - def get_current_depth(self): - # return (self.fm.frame_depth, self.param_depth), but trying to share - # the resulting tuple among several calls - arg0 = self.fm.get_frame_depth() - arg1 = self.param_depth - result = self.assembler._current_depths_cache - if result[0] != arg0 or result[1] != arg1: - result = (arg0, arg1) - self.assembler._current_depths_cache = result - return result - def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - current_depths = self.get_current_depth() self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, - arglocs, result_loc, - current_depths) + arglocs, result_loc) if op.result is not None: self.possibly_free_var(op.result) self.possibly_free_vars(guard_op.getfailargs()) @@ -407,10 +399,8 @@ arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - current_depths = self.get_current_depth() self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, - result_loc, - current_depths) + result_loc) self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): From noreply at buildbot.pypy.org Sun Mar 4 12:25:20 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 12:25:20 +0100 (CET) Subject: [pypy-commit] pypy default: Fix tests. Message-ID: <20120304112520.020818236C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53175:0541ffa5b060 Date: 2012-03-04 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/0541ffa5b060/ Log: Fix tests. diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -28,7 +28,7 @@ class MockGcRootMap(object): is_shadow_stack = False - def get_basic_shape(self, is_64_bit): + def get_basic_shape(self): return ['shape'] def add_frame_offset(self, shape, offset): shape.append(offset) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -34,7 +34,6 @@ ''' loop = self.interpret(ops, [0]) previous = loop._jitcelltoken.compiled_loop_token.frame_depth - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -51,7 +50,6 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous fail = self.run(loop, 0) @@ -116,10 +114,8 @@ loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -606,23 +606,37 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): - def expected_param_depth(self, num_args): + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if IS_X86_32: - return num_args + extra_esp = num_call_args + return extra_esp elif IS_X86_64: - return max(num_args - 6, 0) + # 'num_pushed_input_args' is for X86_64 only + extra_esp = max(num_call_args - 6, 0) + return num_pushed_input_args + extra_esp def test_one_call(self): ops = ''' - [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) - assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(1) + assert clt.frame_depth == self.expected_frame_depth(1, 5) + + def test_one_call_reverse(self): + ops = ''' + [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0] + i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) + ''' + loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.frame_depth == self.expected_frame_depth(1, 6) def test_two_calls(self): ops = ''' @@ -634,7 +648,7 @@ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(2) + assert clt.frame_depth == self.expected_frame_depth(2, 5) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,25 +662,31 @@ loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(10) + assert clt.frame_depth == self.expected_frame_depth(10) def test_bridge_calls_1(self): ops = ''' [i0, i1] i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_value(i2, 0, descr=fdescr1) [i2, i1] + guard_value(i2, 0, descr=fdescr1) [i2, i0, i1] finish(i1) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 5 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(1, 2) + ops = ''' - [i2, i1] + [i2, i0, i1] i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) - finish(i3, descr=fdescr2) + finish(i3, i0, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(2, 2) self.run(loop, 4, 7) assert self.getint(0) == 5*7 @@ -676,10 +696,14 @@ [i0, i1] i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) guard_value(i2, 0, descr=fdescr1) [i2] - finish(i1) + finish(i2) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 4*7 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(2) + ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) @@ -687,7 +711,9 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(1) self.run(loop, 4, 7) assert self.getint(0) == 29 From noreply at buildbot.pypy.org Sun Mar 4 12:25:21 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 12:25:21 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge default Message-ID: <20120304112521.A37EF82008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53176:dec7d899face Date: 2012-03-04 11:24 +0000 http://bitbucket.org/pypy/pypy/changeset/dec7d899face/ Log: hg merge default diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -604,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1490,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3061,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3106,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -110,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -149,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2085,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' From noreply at buildbot.pypy.org Sun Mar 4 13:49:13 2012 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Mar 2012 13:49:13 +0100 (CET) Subject: [pypy-commit] pypy default: bah. of course the identity hash and the identity eq are fast. Message-ID: <20120304124913.3DCFD82008@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r53177:0f597ff4a6ba Date: 2012-03-01 21:12 +0100 http://bitbucket.org/pypy/pypy/changeset/0f597ff4a6ba/ Log: bah. of course the identity hash and the identity eq are fast. diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -364,6 +364,8 @@ def get_ll_hash_function(self): return ll_inst_hash + get_ll_fasthash_function = get_ll_hash_function + def rtype_type(self, hop): raise NotImplementedError diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -449,6 +449,21 @@ assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype + def test_identity_hash_is_fast(self): + class A(object): + pass + + def f(): + return {A(): 1} + + t = TranslationContext() + s = t.buildannotator().build_types(f, []) + rtyper = t.buildrtyper() + rtyper.specialize() + + r_dict = rtyper.getrepr(s) + assert not hasattr(r_dict.lowleveltype.TO.entries.TO.OF, "f_hash") + def test_tuple_dict(self): def f(i): d = {} From noreply at buildbot.pypy.org Sun Mar 4 13:49:16 2012 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Mar 2012 13:49:16 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20120304124916.82B0382008@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r53178:8de7dbbbd436 Date: 2012-03-04 13:31 +0100 http://bitbucket.org/pypy/pypy/changeset/8de7dbbbd436/ Log: merge diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): @@ -1903,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1471,8 +1471,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() @@ -572,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1458,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3029,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3074,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -88,7 +88,6 @@ self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self.fail_boxes_count = 0 - self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 self.propagate_exception_path = 0 @@ -442,10 +441,8 @@ looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - clt.param_depth = -1 # temporarily - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -459,8 +456,7 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -500,14 +496,13 @@ assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) - fail_depths = faildescr._x86_current_depths startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs) stackadjustpos = self._patchable_stackadjust() - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -517,19 +512,16 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -700,15 +692,12 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.get_frame_depth() - param_depth = regalloc.param_depth + frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - param_depth = max(param_depth, target_param_depth) - return frame_depth, param_depth + return frame_depth def _patchable_stackadjust(self): # stack adjustment LEA @@ -892,10 +881,9 @@ genop_math_list[oopspecindex](self, op, arglocs, resloc) def regalloc_perform_with_guard(self, op, guard_op, faillocs, - arglocs, resloc, current_depths): + arglocs, resloc): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - faildescr._x86_current_depths = current_depths failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, @@ -911,10 +899,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, - current_depths): + def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc): self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, - resloc, current_depths) + resloc) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) @@ -1038,13 +1025,14 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) + # + self._regalloc.needed_extra_stack_locations(p//WORD) def _fix_stdcall(self, callconv, p): from pypy.rlib.clibffi import FFI_STDCALL @@ -1127,9 +1115,9 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index) + self._regalloc.needed_extra_stack_locations(len(pass_on_stack)) def call(self, addr, args, res): force_index = self.write_new_force_index() @@ -2136,7 +2124,6 @@ if reg in save_registers: self.mc.MOV_sr(p, reg.value) p += WORD - self._regalloc.reserve_param(p//WORD) # if gcrootmap.is_shadow_stack: args = [] @@ -2192,6 +2179,7 @@ if reg in save_registers: self.mc.MOV_rs(reg.value, p) p += WORD + self._regalloc.needed_extra_stack_locations(p//WORD) def call_reacquire_gil(self, gcrootmap, save_loc): # save the previous result (eax/xmm0) into the stack temporarily. @@ -2199,7 +2187,6 @@ # to save xmm0 in this case. if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_sr(WORD, save_loc.value) - self._regalloc.reserve_param(2) # call the reopenstack() function (also reacquiring the GIL) if gcrootmap.is_shadow_stack: args = [] @@ -2219,6 +2206,7 @@ # restore the result from the stack if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_rs(save_loc.value, WORD) + self._regalloc.needed_extra_stack_locations(2) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2495,11 +2483,6 @@ # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - # reserve room for the argument to the real malloc and the - # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 - # word) - self._regalloc.reserve_param(1+16) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: @@ -2510,6 +2493,11 @@ slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) + # reserve room for the argument to the real malloc and the + # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 + # word) + self._regalloc.needed_extra_stack_locations(1+16) + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -168,7 +168,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - self.param_depth = 0 + self.min_frame_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -193,11 +193,9 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, - allgcrefs): + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -205,8 +203,15 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - def reserve_param(self, n): - self.param_depth = max(self.param_depth, n) + def needed_extra_stack_locations(self, n): + # call *after* you needed extra stack locations: (%esp), (%esp+4)... + min_frame_depth = self.fm.get_frame_depth() + n + if min_frame_depth > self.min_frame_depth: + self.min_frame_depth = min_frame_depth + + def get_final_frame_depth(self): + self.needed_extra_stack_locations(0) # update min_frame_depth + return self.min_frame_depth def _set_initial_bindings(self, inputargs): if IS_X86_64: @@ -376,25 +381,12 @@ def locs_for_fail(self, guard_op): return [self.loc(v) for v in guard_op.getfailargs()] - def get_current_depth(self): - # return (self.fm.frame_depth, self.param_depth), but trying to share - # the resulting tuple among several calls - arg0 = self.fm.get_frame_depth() - arg1 = self.param_depth - result = self.assembler._current_depths_cache - if result[0] != arg0 or result[1] != arg1: - result = (arg0, arg1) - self.assembler._current_depths_cache = result - return result - def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - current_depths = self.get_current_depth() self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, - arglocs, result_loc, - current_depths) + arglocs, result_loc) if op.result is not None: self.possibly_free_var(op.result) self.possibly_free_vars(guard_op.getfailargs()) @@ -407,10 +399,8 @@ arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - current_depths = self.get_current_depth() self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, - result_loc, - current_depths) + result_loc) self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): @@ -1393,7 +1383,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -28,7 +28,7 @@ class MockGcRootMap(object): is_shadow_stack = False - def get_basic_shape(self, is_64_bit): + def get_basic_shape(self): return ['shape'] def add_frame_offset(self, shape, offset): shape.append(offset) @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -34,7 +34,6 @@ ''' loop = self.interpret(ops, [0]) previous = loop._jitcelltoken.compiled_loop_token.frame_depth - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -51,7 +50,6 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous fail = self.run(loop, 0) @@ -116,10 +114,8 @@ loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -606,23 +606,37 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): - def expected_param_depth(self, num_args): + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if IS_X86_32: - return num_args + extra_esp = num_call_args + return extra_esp elif IS_X86_64: - return max(num_args - 6, 0) + # 'num_pushed_input_args' is for X86_64 only + extra_esp = max(num_call_args - 6, 0) + return num_pushed_input_args + extra_esp def test_one_call(self): ops = ''' - [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) - assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(1) + assert clt.frame_depth == self.expected_frame_depth(1, 5) + + def test_one_call_reverse(self): + ops = ''' + [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0] + i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) + ''' + loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.frame_depth == self.expected_frame_depth(1, 6) def test_two_calls(self): ops = ''' @@ -634,7 +648,7 @@ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(2) + assert clt.frame_depth == self.expected_frame_depth(2, 5) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,25 +662,31 @@ loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(10) + assert clt.frame_depth == self.expected_frame_depth(10) def test_bridge_calls_1(self): ops = ''' [i0, i1] i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_value(i2, 0, descr=fdescr1) [i2, i1] + guard_value(i2, 0, descr=fdescr1) [i2, i0, i1] finish(i1) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 5 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(1, 2) + ops = ''' - [i2, i1] + [i2, i0, i1] i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) - finish(i3, descr=fdescr2) + finish(i3, i0, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(2, 2) self.run(loop, 4, 7) assert self.getint(0) == 5*7 @@ -676,10 +696,14 @@ [i0, i1] i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) guard_value(i2, 0, descr=fdescr1) [i2] - finish(i1) + finish(i2) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 4*7 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(2) + ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) @@ -687,7 +711,9 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(1) self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -110,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -149,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,40 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_issue1045(self): + ops = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) + jump(i55) + """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -423,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) @@ -457,7 +491,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -260,7 +259,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,12 +267,14 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -339,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -351,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -368,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -442,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -525,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -535,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -547,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -574,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2064,11 +2072,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: @@ -2084,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] @@ -2349,7 +2358,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -1101,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -326,8 +326,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -159,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -583,13 +586,29 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) - else: - return space.w_NotImplemented + @specialize.arg(3) + def _cmp_impl(space, self, other, space_fn): + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) + + def eq__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -851,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -874,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -60,6 +60,9 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if getattr(pipe, 'returncode', 0) < 0: + raise IOError("subprocess was killed by signal %d" % ( + pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -0,0 +1,26 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestAlloc(BaseTestPyPyC): + + SIZES = dict.fromkeys([2 ** n for n in range(26)] + # up to 32MB + [2 ** n - 1 for n in range(26)]) + + def test_newstr_constant_size(self): + for size in TestAlloc.SIZES: + yield self.newstr_constant_size, size + + def newstr_constant_size(self, size): + src = """if 1: + N = %(size)d + part_a = 'a' * N + part_b = 'b' * N + for i in xrange(20): + ao = '%%s%%s' %% (part_a, part_b) + def main(): + return 42 +""" % {'size': size} + log = self.run(src, [], threshold=10) + assert log.result == 42 + loop, = log.loops_by_filename(self.filepath) + # assert did not crash diff --git a/pypy/module/select/test/test_ztranslation.py b/pypy/module/select/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_select_translates(): + checkmodule('select') diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -62,6 +62,14 @@ @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): + """Copies 'length' characters from the 'src' string to the 'dst' + string, starting at position 'srcstart' and 'dststart'.""" + # xxx Warning: don't try to do this at home. It relies on a lot + # of details to be sure that it works correctly in all cases. + # Notably: no GC operation at all from the first cast_ptr_to_adr() + # because it might move the strings. The keepalive_until_here() + # are obscurely essential to make sure that the strings stay alive + # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -894,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) From noreply at buildbot.pypy.org Sun Mar 4 15:03:25 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 15:03:25 +0100 (CET) Subject: [pypy-commit] pypy default: These names are fine, but are bound to conflict with some other name Message-ID: <20120304140325.D226082008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53179:f1509bac7c63 Date: 2012-03-04 15:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f1509bac7c63/ Log: These names are fine, but are bound to conflict with some other name from somewhere else :-( Added a 'ffi_' prefix. diff --git a/pypy/translator/c/src/libffi_msvc/ffi.c b/pypy/translator/c/src/libffi_msvc/ffi.c --- a/pypy/translator/c/src/libffi_msvc/ffi.c +++ b/pypy/translator/c/src/libffi_msvc/ffi.c @@ -71,31 +71,31 @@ switch ((*p_arg)->type) { case FFI_TYPE_SINT8: - *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT8 *)(* p_argv); break; case FFI_TYPE_UINT8: - *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT8 *)(* p_argv); break; case FFI_TYPE_SINT16: - *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT16 *)(* p_argv); break; case FFI_TYPE_UINT16: - *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT16 *)(* p_argv); break; case FFI_TYPE_SINT32: - *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT32 *)(* p_argv); break; case FFI_TYPE_UINT32: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; case FFI_TYPE_STRUCT: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; default: diff --git a/pypy/translator/c/src/libffi_msvc/ffi_common.h b/pypy/translator/c/src/libffi_msvc/ffi_common.h --- a/pypy/translator/c/src/libffi_msvc/ffi_common.h +++ b/pypy/translator/c/src/libffi_msvc/ffi_common.h @@ -56,16 +56,18 @@ } extended_cif; /* Terse sized type definitions. */ -typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); -typedef signed int SINT8 __attribute__((__mode__(__QI__))); -typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); -typedef signed int SINT16 __attribute__((__mode__(__HI__))); -typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); -typedef signed int SINT32 __attribute__((__mode__(__SI__))); -typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); -typedef signed int SINT64 __attribute__((__mode__(__DI__))); +/* Fix for PyPy: these names are fine, but are bound to conflict with + * some other name from somewhere else :-( Added a 'ffi_' prefix. */ +typedef unsigned int ffi_UINT8 __attribute__((__mode__(__QI__))); +typedef signed int ffi_SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int ffi_UINT16 __attribute__((__mode__(__HI__))); +typedef signed int ffi_SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int ffi_UINT32 __attribute__((__mode__(__SI__))); +typedef signed int ffi_SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int ffi_UINT64 __attribute__((__mode__(__DI__))); +typedef signed int ffi_SINT64 __attribute__((__mode__(__DI__))); -typedef float FLOAT32; +typedef float ffi_FLOAT32; #ifdef __cplusplus From pullrequests-noreply at bitbucket.org Sun Mar 4 17:04:32 2012 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 04 Mar 2012 16:04:32 -0000 Subject: [pypy-commit] [OPEN] Pull request #29 for pypy/pypy: Finish kqueue support In-Reply-To: References: Message-ID: <20120304160432.5463.83886@bitbucket05.managed.contegix.com> Pull request #29 has been updated by Tobias Oberstein to include new changes. https://bitbucket.org/pypy/pypy/pull-request/29/finish-kqueue-support Title: Finish kqueue support Creator: Tobias Oberstein Working kqueue support. Translator test, all unit tests succeed, PyPy builds, Twisted trunk runs, Autobahn WebSocket testsuite runs. Updated list of changes: d107b907c070 by Tobias Oberstein: "Merge upstream." 5c06272fbaa9 by Tobias Oberstein: "Correct use of integer types." 87f0c2c83a66 by Tobias Oberstein: "Closely follow the CPy impl.." ee259270972a by Tobias Oberstein: "Implement all comparison ops." 58b8b3f12824 by Tobias Oberstein: "Add missing test from CPy, fix types." 3da8ca8d473f by Tobias Oberstein: "Enable kqueue on all BSDish platforms." 1205ef1c2c35 by Tobias Oberstein: "Cosmetical changes." 696dedf6f58f by Tobias Oberstein: "Merge upstream." a701fa0e939b by Tobias Oberstein: "Make all unit tests pass." fa4020666f4c by Tobias Oberstein: "Various fixes, implement kevent comparison." b11d2bf7c116 by Tobias Oberstein: "Merge upstream." 3d6ef205bbaa by Tobias Oberstein: "Some build fixes." 1996736bd620 by Tobias Oberstein: "Implement timeout forwarding." 39358fbeb957 by Tobias Oberstein: "Implement kqueue control." ff829d29d644 by Tobias Oberstein: "Complete symbols, streamline code, fix include, whitespace." e46a58cf8ee6 by Tobias Oberstein: "Fix module init, test init." b12df2b6eaa8 by Tobias Oberstein: "Merging trunk and resolving conflicts." cc37b011ea0f by Alex Gaynor: "a little work" 0b624568902e by Alex Gaynor: "merged upstream" 38bc1b0542fa by Alex Gaynor: "resolved merge conflicts" 32456c2f21a2 by Alex Gaynor: "Implemented kevent and started on kqueue." 1a5f26bde444 by Alex Gaynor: "Allow submodules for MixedModules, imported from my psycopg2 fork." 0a34a5b002d1 by Alex Gaynor: "Final typo fix." 4e7c33ca4b88 by Alex Gaynor: "One more typo fix." 089f3314fd8b by Alex Gaynor: "typo fix." 0b8353f1a1b4 by Alex Gaynor: "Missing import." 1ef20b0124c0 by Alex Gaynor: "Remove this test, it doesn't seem to pass under CPython." dae97b70bdfe by Alex Gaynor: "Fix a typo in the tests." 682046107ffe by Alex Gaynor: "Added tests and skeleton." -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sun Mar 4 17:08:08 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sun, 04 Mar 2012 16:08:08 -0000 Subject: [pypy-commit] [COMMENT] Pull request #29 for pypy/pypy: Finish kqueue support In-Reply-To: References: Message-ID: <20120304160808.13701.51584@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/29/finish-kqueue-support#comment-3546 Tobias Oberstein (oberstet) said: Hi Alex, I've done all of above, plus a couple of things which bring the implementation more in line with CPy. After discussion with arigato: I've also corrected the use of integer types. In that context I've added intptr_t, uintptr_t to TYPES in rffi.py. In general the types: int, ssize_t, intptr_t can be equal, or not. Same for unsigned ones. See also: http://stackoverflow.com/questions/1464174/size-t-vs-intptr-t I have merged current upstream, run translate/unit tests. PyPy is building right now. Let me know if there are further things you'd like to have changed. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Mar 4 17:09:00 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 17:09:00 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: hg merge default Message-ID: <20120304160900.C544882008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53180:387d2db68baf Date: 2012-03-04 16:31 +0100 http://bitbucket.org/pypy/pypy/changeset/387d2db68baf/ Log: hg merge default diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): @@ -1903,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -604,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1490,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3061,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3106,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -94,7 +94,6 @@ self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self.fail_boxes_count = 0 - self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 self.propagate_exception_path = 0 @@ -532,18 +531,15 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # - frame_size_pos = self._call_header_with_stack_check() + stackadjustpos = self._call_header_with_stack_check() clt._debug_nbargs = len(inputargs) operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - #clt.param_depth = -1 # temporarily - (frame_depth#, param_depth - ) = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - #clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -557,8 +553,7 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + frame_size_pos, - frame_depth )#+ param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -598,15 +593,13 @@ assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) - fail_depths = faildescr._x86_current_depths startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs) - frame_size_pos = self._enter_bridge_code(regalloc) - (frame_depth #, param_depth - ) = self._assemble(regalloc, operations) + stackadjustpos = self._enter_bridge_code(regalloc) + frame_depth = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -616,19 +609,16 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + frame_size_pos, - frame_depth) # + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - #faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - #self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -800,14 +790,11 @@ if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging frame_depth = regalloc.fm.get_frame_depth() - #param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - #target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - #param_depth = max(param_depth, target_param_depth) - return frame_depth#, param_depth + return frame_depth def _patchable_stackadjust(self): xxx @@ -1074,10 +1061,9 @@ genop_math_list[oopspecindex](self, op, arglocs, resloc) def regalloc_perform_with_guard(self, op, guard_op, faillocs, - arglocs, resloc, current_depths): + arglocs, resloc): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - faildescr._x86_current_depths = current_depths failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, @@ -1093,10 +1079,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, - current_depths): + def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc): self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, - resloc, current_depths) + resloc) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) @@ -1231,7 +1216,6 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - #self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index, extra_esp=extra_esp) @@ -1329,7 +1313,6 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - #self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index, extra_esp=extra_esp) @@ -2344,7 +2327,6 @@ if reg in save_registers: self.mc.MOV_sr(p, reg.value) p += WORD - self._regalloc.reserve_param(p//WORD) # if gcrootmap.is_shadow_stack: args = [] @@ -2400,6 +2382,7 @@ if reg in save_registers: self.mc.MOV_rs(reg.value, p) p += WORD + self._regalloc.needed_extra_stack_locations(p//WORD) def call_reacquire_gil(self, gcrootmap, save_loc): # save the previous result (eax/xmm0) into the stack temporarily. @@ -2407,7 +2390,6 @@ # to save xmm0 in this case. if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_sr(WORD, save_loc.value) - self._regalloc.reserve_param(2) # call the reopenstack() function (also reacquiring the GIL) if gcrootmap.is_shadow_stack: args = [] @@ -2427,6 +2409,7 @@ # restore the result from the stack if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_rs(save_loc.value, WORD) + self._regalloc.needed_extra_stack_locations(2) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2704,11 +2687,6 @@ # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - # reserve room for the argument to the real malloc and the - # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 - # word) - self._regalloc.reserve_param(1+16) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: @@ -2719,6 +2697,11 @@ slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) + # reserve room for the argument to the real malloc and the + # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 + # word) + self._regalloc.needed_extra_stack_locations(1+16) + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -170,7 +170,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - #self.param_depth = 0 + self.min_frame_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -195,11 +195,9 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, - allgcrefs): + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - #self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -400,27 +398,12 @@ def locs_for_fail(self, guard_op): return [self.loc(v) for v in guard_op.getfailargs()] - def get_current_depth(self): - # return (self.fm.frame_depth, self.param_depth), but trying to share - # the resulting tuple among several calls - arg0 = self.fm.get_frame_depth() - return arg0 - # - arg1 = self.param_depth - result = self.assembler._current_depths_cache - if result[0] != arg0 or result[1] != arg1: - result = (arg0, arg1) - self.assembler._current_depths_cache = result - return result - def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - current_depths = self.get_current_depth() self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, - arglocs, result_loc, - current_depths) + arglocs, result_loc) if op.result is not None: self.possibly_free_var(op.result) self.possibly_free_vars(guard_op.getfailargs()) @@ -433,10 +416,8 @@ arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - current_depths = self.get_current_depth() self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, - result_loc, - current_depths) + result_loc) self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -28,7 +28,7 @@ class MockGcRootMap(object): is_shadow_stack = False - def get_basic_shape(self, is_64_bit): + def get_basic_shape(self): return ['shape'] def add_frame_offset(self, shape, offset): shape.append(offset) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -34,7 +34,6 @@ ''' loop = self.interpret(ops, [0]) previous = loop._jitcelltoken.compiled_loop_token.frame_depth - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -51,7 +50,6 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous fail = self.run(loop, 0) @@ -116,10 +114,8 @@ loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -606,23 +606,37 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): - def expected_param_depth(self, num_args): + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if IS_X86_32: - return num_args + extra_esp = num_call_args + return extra_esp elif IS_X86_64: - return max(num_args - 6, 0) + # 'num_pushed_input_args' is for X86_64 only + extra_esp = max(num_call_args - 6, 0) + return num_pushed_input_args + extra_esp def test_one_call(self): ops = ''' - [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) - assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(1) + assert clt.frame_depth == self.expected_frame_depth(1, 5) + + def test_one_call_reverse(self): + ops = ''' + [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0] + i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) + ''' + loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.frame_depth == self.expected_frame_depth(1, 6) def test_two_calls(self): ops = ''' @@ -634,7 +648,7 @@ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(2) + assert clt.frame_depth == self.expected_frame_depth(2, 5) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,25 +662,31 @@ loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(10) + assert clt.frame_depth == self.expected_frame_depth(10) def test_bridge_calls_1(self): ops = ''' [i0, i1] i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_value(i2, 0, descr=fdescr1) [i2, i1] + guard_value(i2, 0, descr=fdescr1) [i2, i0, i1] finish(i1) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 5 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(1, 2) + ops = ''' - [i2, i1] + [i2, i0, i1] i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) - finish(i3, descr=fdescr2) + finish(i3, i0, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(2, 2) self.run(loop, 4, 7) assert self.getint(0) == 5*7 @@ -676,10 +696,14 @@ [i0, i1] i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) guard_value(i2, 0, descr=fdescr1) [i2] - finish(i1) + finish(i2) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 4*7 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(2) + ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) @@ -687,7 +711,9 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(1) self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -110,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -149,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -273,9 +272,9 @@ not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -341,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -353,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -370,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -444,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -527,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -537,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -549,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -576,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2085,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 diff --git a/pypy/module/select/test/test_ztranslation.py b/pypy/module/select/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_select_translates(): + checkmodule('select') diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -62,6 +62,14 @@ @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): + """Copies 'length' characters from the 'src' string to the 'dst' + string, starting at position 'srcstart' and 'dststart'.""" + # xxx Warning: don't try to do this at home. It relies on a lot + # of details to be sure that it works correctly in all cases. + # Notably: no GC operation at all from the first cast_ptr_to_adr() + # because it might move the strings. The keepalive_until_here() + # are obscurely essential to make sure that the strings stay alive + # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -364,6 +364,8 @@ def get_ll_hash_function(self): return ll_inst_hash + get_ll_fasthash_function = get_ll_hash_function + def rtype_type(self, hop): raise NotImplementedError diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -449,6 +449,21 @@ assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype + def test_identity_hash_is_fast(self): + class A(object): + pass + + def f(): + return {A(): 1} + + t = TranslationContext() + s = t.buildannotator().build_types(f, []) + rtyper = t.buildrtyper() + rtyper.specialize() + + r_dict = rtyper.getrepr(s) + assert not hasattr(r_dict.lowleveltype.TO.entries.TO.OF, "f_hash") + def test_tuple_dict(self): def f(i): d = {} diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' diff --git a/pypy/translator/c/src/libffi_msvc/ffi.c b/pypy/translator/c/src/libffi_msvc/ffi.c --- a/pypy/translator/c/src/libffi_msvc/ffi.c +++ b/pypy/translator/c/src/libffi_msvc/ffi.c @@ -71,31 +71,31 @@ switch ((*p_arg)->type) { case FFI_TYPE_SINT8: - *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT8 *)(* p_argv); break; case FFI_TYPE_UINT8: - *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT8 *)(* p_argv); break; case FFI_TYPE_SINT16: - *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT16 *)(* p_argv); break; case FFI_TYPE_UINT16: - *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT16 *)(* p_argv); break; case FFI_TYPE_SINT32: - *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT32 *)(* p_argv); break; case FFI_TYPE_UINT32: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; case FFI_TYPE_STRUCT: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; default: diff --git a/pypy/translator/c/src/libffi_msvc/ffi_common.h b/pypy/translator/c/src/libffi_msvc/ffi_common.h --- a/pypy/translator/c/src/libffi_msvc/ffi_common.h +++ b/pypy/translator/c/src/libffi_msvc/ffi_common.h @@ -56,16 +56,18 @@ } extended_cif; /* Terse sized type definitions. */ -typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); -typedef signed int SINT8 __attribute__((__mode__(__QI__))); -typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); -typedef signed int SINT16 __attribute__((__mode__(__HI__))); -typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); -typedef signed int SINT32 __attribute__((__mode__(__SI__))); -typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); -typedef signed int SINT64 __attribute__((__mode__(__DI__))); +/* Fix for PyPy: these names are fine, but are bound to conflict with + * some other name from somewhere else :-( Added a 'ffi_' prefix. */ +typedef unsigned int ffi_UINT8 __attribute__((__mode__(__QI__))); +typedef signed int ffi_SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int ffi_UINT16 __attribute__((__mode__(__HI__))); +typedef signed int ffi_SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int ffi_UINT32 __attribute__((__mode__(__SI__))); +typedef signed int ffi_SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int ffi_UINT64 __attribute__((__mode__(__DI__))); +typedef signed int ffi_SINT64 __attribute__((__mode__(__DI__))); -typedef float FLOAT32; +typedef float ffi_FLOAT32; #ifdef __cplusplus From noreply at buildbot.pypy.org Sun Mar 4 17:09:02 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 17:09:02 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Optionally save the xmm regs around calls to malloc() and Message-ID: <20120304160902.0932E82008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53181:ad6a18612ff0 Date: 2012-03-04 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ad6a18612ff0/ Log: Optionally save the xmm regs around calls to malloc() and realloc(). We'll see if it should be enabled by default. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -67,6 +67,11 @@ # 'e'ntry point ('number', lltype.Signed)) +# It's probably fine to assume that malloc() and realloc() don't touch the +# xmm registers, but the following constant can be used to change that. +MALLOC_REALLOC_USE_XMM_REGISTERS = False + + class Assembler386(object): _regalloc = None _output_loop_log = None @@ -284,6 +289,7 @@ def _build_realloc_bridge_slowpath(self): from pypy.jit.backend.x86.regalloc import gpr_reg_mgr_cls + from pypy.jit.backend.x86.regalloc import xmm_reg_mgr_cls # This defines a function called at the start of a bridge to # increase the size of the off-stack frame. It must preserve # all registers. @@ -307,8 +313,17 @@ # will save some registers in the caller's frame, in the # temporary OFFSTACK_REAL_FRAME words. save_regs = gpr_reg_mgr_cls.save_around_call_regs + if self.cpu.supports_floats and ( + MALLOC_REALLOC_USE_XMM_REGISTERS or not we_are_translated()): + save_xmm_regs = xmm_reg_mgr_cls.save_around_call_regs + else: + save_xmm_regs = [] + # if IS_X86_32: - assert OFFSTACK_REAL_FRAME >= 2 + assert OFFSTACK_REAL_FRAME + 1 >= 3 + 2 * len(save_xmm_regs) + # \_ size incl retaddr _/ \___ max ofs from esp ___/ + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_sx((3+2*i) * WORD, save_xmm_regs[i].value) assert len(save_regs) == 3 # there are 3 PUSHes in total here. With the retaddr, the # stack remains aligned. @@ -324,15 +339,21 @@ self.mc.PUSH_r(eax.value) # elif IS_X86_64: - assert OFFSTACK_REAL_FRAME >= len(save_regs) - 1 - # there is only 1 PUSH in total here. With the retaddr, the + NUMPUSHES = 5 # an odd number + ofsbase = 1 + len(save_regs) - NUMPUSHES + assert OFFSTACK_REAL_FRAME + 1 >= ofsbase + len(save_xmm_regs) + # \_ size incl retaddr _/ \____ max ofs from esp ____/ + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_sx((ofsbase+i) * WORD, save_xmm_regs[i].value) + # there are NUMPUSHES PUSHes in total here. With the retaddr, the # stack remains aligned. - for j in range(len(save_regs)-1, 0, -1): - self.mc.MOV_sr(j*WORD, save_regs[j].value) - self.mc.PUSH_r(save_regs[0].value) + for j in range(NUMPUSHES, len(save_regs)): + self.mc.MOV_sr((j-NUMPUSHES+1)*WORD, save_regs[j].value) + for j in range(NUMPUSHES): + self.mc.PUSH_r(save_regs[j].value) # # fish fish fish (see above) - self.mc.MOV_rs(esi.value, WORD) # load the retaddr + self.mc.MOV_rs(esi.value, NUMPUSHES*WORD) # load the retaddr self.mc.MOV32_rm(esi.value, (esi.value, -self.realloc_bridge_ofs)) # @@ -346,7 +367,9 @@ # fix the OFFSTACK_SIZE_ALLOCATED in the updated memory location if IS_X86_32: self.mc.ADD_ri(esp.value, 2*WORD) - self.mc.MOV_rs(eax.value, WORD) # load the retaddr again + self.mc.MOV_rs(eax.value, WORD) # load the retaddr again + elif IS_X86_64: + self.mc.MOV_rs(eax.value, NUMPUSHES*WORD) # load the retaddr again self.mc.MOV32_rm(eax.value, (eax.value, -self.realloc_bridge_ofs)) self.mc.MOV_br(WORD * OFFSTACK_SIZE_ALLOCATED, eax.value) # @@ -359,10 +382,15 @@ self.mc.POP_r(save_regs[2].value) self.mc.MOV_rs(save_regs[1].value, 2*WORD) self.mc.MOV_rs(save_regs[0].value, 1*WORD) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_xs(save_xmm_regs[i].value, (3+2*i) * WORD) elif IS_X86_64: - self.mc.POP_r(save_regs[0].value) - for j in range(len(save_regs)-1, 0, -1): - self.mc.MOV_rs(save_regs[j].value, j*WORD) + for j in range(NUMPUSHES-1, -1, -1): + self.mc.POP_r(save_regs[j].value) + for j in range(NUMPUSHES, len(save_regs)): + self.mc.MOV_rs(save_regs[j].value, (j-NUMPUSHES+1)*WORD) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_xs(save_xmm_regs[i].value, (ofsbase+i) * WORD) self.mc.RET() # rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) @@ -852,10 +880,16 @@ elif IS_X86_64: # XXX need to save and restore all possible argument registers save_regs = [r9, r8, ecx, edx, esi, edi] - assert OFFSTACK_REAL_FRAME > len(save_regs) + if MALLOC_REALLOC_USE_XMM_REGISTERS or not we_are_translated(): + save_xmm_regs = [xmm7,xmm6,xmm5,xmm4,xmm3,xmm2,xmm1,xmm0] + else: + save_xmm_regs = [] + assert OFFSTACK_REAL_FRAME > len(save_regs) + len(save_xmm_regs) for i in range(len(save_regs)): self.mc.MOV_sr(WORD * (1 + i), save_regs[i].value) - # assume that the XMM registers are safe. + base = 1 + len(save_regs) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_sx(WORD * (base + i), save_xmm_regs[i].value) self.mc.MOV_riu32(edi.value, 0x77777777) # temporary frame_size_pos = self.mc.get_relative_pos() - 4 # @@ -882,6 +916,9 @@ # reload the original value of the save_regs (including edi) for i in range(len(save_regs)): self.mc.MOV_rs(save_regs[i].value, WORD * (1 + i)) + base = 1 + len(save_regs) + for i in range(len(save_xmm_regs)): + self.mc.MOVSD_xs(save_xmm_regs[i].value, WORD * (base + i)) # # save in the freshly malloc'ed block the original value of # all other callee-saved registers From noreply at buildbot.pypy.org Sun Mar 4 17:31:50 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 17:31:50 +0100 (CET) Subject: [pypy-commit] pypy continulet-jit-2: Fix: should not have killed needed_extra_stack_locations() from 'default'. Message-ID: <20120304163150.AAC1182008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-jit-2 Changeset: r53182:344d1fecc21d Date: 2012-03-04 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/344d1fecc21d/ Log: Fix: should not have killed needed_extra_stack_locations() from 'default'. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -817,7 +817,7 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.get_frame_depth() + frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -205,10 +205,18 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - @staticmethod - def reserve_param(n): + def needed_extra_stack_locations(self, n): assert n <= OFFSTACK_REAL_FRAME - #self.param_depth = max(self.param_depth, n) + self.min_frame_depth = self.fm.get_frame_depth() + return + # call *after* you needed extra stack locations: (%esp), (%esp+4)... + min_frame_depth = self.fm.get_frame_depth() + n + if min_frame_depth > self.min_frame_depth: + self.min_frame_depth = min_frame_depth + + def get_final_frame_depth(self): + self.needed_extra_stack_locations(0) # update min_frame_depth + return self.min_frame_depth def _set_initial_bindings(self, inputargs): if IS_X86_64: From noreply at buildbot.pypy.org Sun Mar 4 17:41:15 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Mar 2012 17:41:15 +0100 (CET) Subject: [pypy-commit] pypy default: cpyext: Steal from CPython the implementation of TLS functions: Message-ID: <20120304164115.3568282008@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r53183:fc46fb928ddb Date: 2012-03-04 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/fc46fb928ddb/ Log: cpyext: Steal from CPython the implementation of TLS functions: PyThread_set_key_value and friends. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -352,6 +352,9 @@ 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', 'PyOS_getsig', 'PyOS_setsig', + 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', + 'PyThread_get_key_value', 'PyThread_delete_key_value', + 'PyThread_ReInitTLS', 'PyStructSequence_InitType', 'PyStructSequence_New', ] @@ -617,6 +620,10 @@ lambda space: init_pycobject(), lambda space: init_capsule(), ]) + from pypy.module.posix.interp_posix import add_fork_hook + reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, + compilation_info=eci) + add_fork_hook('child', reinit_tls) def init_function(func): INIT_FUNCTIONS.append(func) @@ -926,6 +933,7 @@ source_dir / "structseq.c", source_dir / "capsule.c", source_dir / "pysignals.c", + source_dir / "thread.c", ], separate_module_sources=separate_module_sources, export_symbols=export_symbols_eci, diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h --- a/pypy/module/cpyext/include/pythread.h +++ b/pypy/module/cpyext/include/pythread.h @@ -3,8 +3,26 @@ #define WITH_THREAD +#ifdef __cplusplus +extern "C" { +#endif + typedef void *PyThread_type_lock; #define WAIT_LOCK 1 #define NOWAIT_LOCK 0 +/* Thread Local Storage (TLS) API */ +PyAPI_FUNC(int) PyThread_create_key(void); +PyAPI_FUNC(void) PyThread_delete_key(int); +PyAPI_FUNC(int) PyThread_set_key_value(int, void *); +PyAPI_FUNC(void *) PyThread_get_key_value(int); +PyAPI_FUNC(void) PyThread_delete_key_value(int key); + +/* Cleanup after a fork */ +PyAPI_FUNC(void) PyThread_ReInitTLS(void); + +#ifdef __cplusplus +} #endif + +#endif diff --git a/pypy/module/cpyext/src/thread.c b/pypy/module/cpyext/src/thread.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/thread.c @@ -0,0 +1,313 @@ +#include +#include "pythread.h" + +/* ------------------------------------------------------------------------ +Per-thread data ("key") support. + +Use PyThread_create_key() to create a new key. This is typically shared +across threads. + +Use PyThread_set_key_value(thekey, value) to associate void* value with +thekey in the current thread. Each thread has a distinct mapping of thekey +to a void* value. Caution: if the current thread already has a mapping +for thekey, value is ignored. + +Use PyThread_get_key_value(thekey) to retrieve the void* value associated +with thekey in the current thread. This returns NULL if no value is +associated with thekey in the current thread. + +Use PyThread_delete_key_value(thekey) to forget the current thread's associated +value for thekey. PyThread_delete_key(thekey) forgets the values associated +with thekey across *all* threads. + +While some of these functions have error-return values, none set any +Python exception. + +None of the functions does memory management on behalf of the void* values. +You need to allocate and deallocate them yourself. If the void* values +happen to be PyObject*, these functions don't do refcount operations on +them either. + +The GIL does not need to be held when calling these functions; they supply +their own locking. This isn't true of PyThread_create_key(), though (see +next paragraph). + +There's a hidden assumption that PyThread_create_key() will be called before +any of the other functions are called. There's also a hidden assumption +that calls to PyThread_create_key() are serialized externally. +------------------------------------------------------------------------ */ + +#ifdef MS_WINDOWS +#include + +/* use native Windows TLS functions */ +#define Py_HAVE_NATIVE_TLS + +int +PyThread_create_key(void) +{ + return (int) TlsAlloc(); +} + +void +PyThread_delete_key(int key) +{ + TlsFree(key); +} + +/* We must be careful to emulate the strange semantics implemented in thread.c, + * where the value is only set if it hasn't been set before. + */ +int +PyThread_set_key_value(int key, void *value) +{ + BOOL ok; + void *oldvalue; + + assert(value != NULL); + oldvalue = TlsGetValue(key); + if (oldvalue != NULL) + /* ignore value if already set */ + return 0; + ok = TlsSetValue(key, value); + if (!ok) + return -1; + return 0; +} + +void * +PyThread_get_key_value(int key) +{ + /* because TLS is used in the Py_END_ALLOW_THREAD macro, + * it is necessary to preserve the windows error state, because + * it is assumed to be preserved across the call to the macro. + * Ideally, the macro should be fixed, but it is simpler to + * do it here. + */ + DWORD error = GetLastError(); + void *result = TlsGetValue(key); + SetLastError(error); + return result; +} + +void +PyThread_delete_key_value(int key) +{ + /* NULL is used as "key missing", and it is also the default + * given by TlsGetValue() if nothing has been set yet. + */ + TlsSetValue(key, NULL); +} + +/* reinitialization of TLS is not necessary after fork when using + * the native TLS functions. And forking isn't supported on Windows either. + */ +void +PyThread_ReInitTLS(void) +{} + +#else /* MS_WINDOWS */ + +/* A singly-linked list of struct key objects remembers all the key->value + * associations. File static keyhead heads the list. keymutex is used + * to enforce exclusion internally. + */ +struct key { + /* Next record in the list, or NULL if this is the last record. */ + struct key *next; + + /* The thread id, according to PyThread_get_thread_ident(). */ + long id; + + /* The key and its associated value. */ + int key; + void *value; +}; + +static struct key *keyhead = NULL; +static PyThread_type_lock keymutex = NULL; +static int nkeys = 0; /* PyThread_create_key() hands out nkeys+1 next */ + +/* Internal helper. + * If the current thread has a mapping for key, the appropriate struct key* + * is returned. NB: value is ignored in this case! + * If there is no mapping for key in the current thread, then: + * If value is NULL, NULL is returned. + * Else a mapping of key to value is created for the current thread, + * and a pointer to a new struct key* is returned; except that if + * malloc() can't find room for a new struct key*, NULL is returned. + * So when value==NULL, this acts like a pure lookup routine, and when + * value!=NULL, this acts like dict.setdefault(), returning an existing + * mapping if one exists, else creating a new mapping. + * + * Caution: this used to be too clever, trying to hold keymutex only + * around the "p->next = keyhead; keyhead = p" pair. That allowed + * another thread to mutate the list, via key deletion, concurrent with + * find_key() crawling over the list. Hilarity ensued. For example, when + * the for-loop here does "p = p->next", p could end up pointing at a + * record that PyThread_delete_key_value() was concurrently free()'ing. + * That could lead to anything, from failing to find a key that exists, to + * segfaults. Now we lock the whole routine. + */ +static struct key * +find_key(int key, void *value) +{ + struct key *p, *prev_p; + long id = PyThread_get_thread_ident(); + + if (!keymutex) + return NULL; + PyThread_acquire_lock(keymutex, 1); + prev_p = NULL; + for (p = keyhead; p != NULL; p = p->next) { + if (p->id == id && p->key == key) + goto Done; + /* Sanity check. These states should never happen but if + * they do we must abort. Otherwise we'll end up spinning in + * in a tight loop with the lock held. A similar check is done + * in pystate.c tstate_delete_common(). */ + if (p == prev_p) + Py_FatalError("tls find_key: small circular list(!)"); + prev_p = p; + if (p->next == keyhead) + Py_FatalError("tls find_key: circular list(!)"); + } + if (value == NULL) { + assert(p == NULL); + goto Done; + } + p = (struct key *)malloc(sizeof(struct key)); + if (p != NULL) { + p->id = id; + p->key = key; + p->value = value; + p->next = keyhead; + keyhead = p; + } + Done: + PyThread_release_lock(keymutex); + return p; +} + +/* Return a new key. This must be called before any other functions in + * this family, and callers must arrange to serialize calls to this + * function. No violations are detected. + */ +int +PyThread_create_key(void) +{ + /* All parts of this function are wrong if it's called by multiple + * threads simultaneously. + */ + if (keymutex == NULL) + keymutex = PyThread_allocate_lock(); + return ++nkeys; +} + +/* Forget the associations for key across *all* threads. */ +void +PyThread_delete_key(int key) +{ + struct key *p, **q; + + PyThread_acquire_lock(keymutex, 1); + q = &keyhead; + while ((p = *q) != NULL) { + if (p->key == key) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + } + else + q = &p->next; + } + PyThread_release_lock(keymutex); +} + +/* Confusing: If the current thread has an association for key, + * value is ignored, and 0 is returned. Else an attempt is made to create + * an association of key to value for the current thread. 0 is returned + * if that succeeds, but -1 is returned if there's not enough memory + * to create the association. value must not be NULL. + */ +int +PyThread_set_key_value(int key, void *value) +{ + struct key *p; + + assert(value != NULL); + p = find_key(key, value); + if (p == NULL) + return -1; + else + return 0; +} + +/* Retrieve the value associated with key in the current thread, or NULL + * if the current thread doesn't have an association for key. + */ +void * +PyThread_get_key_value(int key) +{ + struct key *p = find_key(key, NULL); + + if (p == NULL) + return NULL; + else + return p->value; +} + +/* Forget the current thread's association for key, if any. */ +void +PyThread_delete_key_value(int key) +{ + long id = PyThread_get_thread_ident(); + struct key *p, **q; + + PyThread_acquire_lock(keymutex, 1); + q = &keyhead; + while ((p = *q) != NULL) { + if (p->key == key && p->id == id) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + break; + } + else + q = &p->next; + } + PyThread_release_lock(keymutex); +} + +/* Forget everything not associated with the current thread id. + * This function is called from PyOS_AfterFork(). It is necessary + * because other thread ids which were in use at the time of the fork + * may be reused for new threads created in the forked process. + */ +void +PyThread_ReInitTLS(void) +{ + long id = PyThread_get_thread_ident(); + struct key *p, **q; + + if (!keymutex) + return; + + /* As with interpreter_lock in PyEval_ReInitThreads() + we just create a new lock without freeing the old one */ + keymutex = PyThread_allocate_lock(); + + /* Delete all keys which do not match the current thread id */ + q = &keyhead; + while ((p = *q) != NULL) { + if (p->id != id) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + } + else + q = &p->next; + } +} + +#endif /* !MS_WINDOWS */ diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -5,6 +5,7 @@ from pypy.module.thread.ll_thread import allocate_ll_lock from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class TestPyThread(BaseApiTest): @@ -38,3 +39,51 @@ api.PyThread_release_lock(lock) assert api.PyThread_acquire_lock(lock, 0) == 1 api.PyThread_free_lock(lock) + + +class AppTestThread(AppTestCpythonExtensionBase): + def test_tls(self): + module = self.import_extension('foo', [ + ("create_key", "METH_NOARGS", + """ + return PyInt_FromLong(PyThread_create_key()); + """), + ("test_key", "METH_O", + """ + int key = PyInt_AsLong(args); + if (PyThread_get_key_value(key) != NULL) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + if (PyThread_set_key_value(key, (void*)123) < 0) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + if (PyThread_get_key_value(key) != (void*)123) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + Py_RETURN_NONE; + """), + ]) + key = module.create_key() + assert key > 0 + # Test value in main thread. + module.test_key(key) + raises(ValueError, module.test_key, key) + # Same test, in another thread. + result = [] + import thread, time + def in_thread(): + try: + module.test_key(key) + raises(ValueError, module.test_key, key) + except Exception, e: + result.append(e) + else: + result.append(True) + thread.start_new_thread(in_thread, ()) + while not result: + print "." + time.sleep(.5) + assert result == [True] From noreply at buildbot.pypy.org Sun Mar 4 17:41:16 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Mar 2012 17:41:16 +0100 (CET) Subject: [pypy-commit] pypy default: Attempt to fix translation on macosx, which uses a narrow unicode build of CPython. Message-ID: <20120304164116.C5D6982008@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r53184:2f06eb672d0e Date: 2012-03-04 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/2f06eb672d0e/ Log: Attempt to fix translation on macosx, which uses a narrow unicode build of CPython. diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -178,7 +178,7 @@ @cpython_api([], Py_UNICODE, error=CANNOT_FAIL) def PyUnicode_GetMax(space): """Get the maximum ordinal for a Unicode character.""" - return unichr(runicode.MAXUNICODE) + return runicode.UNICHR(runicode.MAXUNICODE) @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): From noreply at buildbot.pypy.org Sun Mar 4 18:13:10 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 4 Mar 2012 18:13:10 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: merge with default, still waiting for review Message-ID: <20120304171310.D97B782008@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53185:5c09a846eaaa Date: 2012-03-04 17:45 +0200 http://bitbucket.org/pypy/pypy/changeset/5c09a846eaaa/ Log: merge with default, still waiting for review diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py --- a/lib-python/modified-2.7/ctypes/test/test_arrays.py +++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py @@ -1,12 +1,23 @@ import unittest from ctypes import * +from test.test_support import impl_detail formats = "bBhHiIlLqQfd" +# c_longdouble commented out for PyPy, look at the commend in test_longdouble formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ - c_long, c_ulonglong, c_float, c_double, c_longdouble + c_long, c_ulonglong, c_float, c_double #, c_longdouble class ArrayTestCase(unittest.TestCase): + + @impl_detail('long double not supported by PyPy', pypy=False) + def test_longdouble(self): + """ + This test is empty. It's just here to remind that we commented out + c_longdouble in "formats". If pypy will ever supports c_longdouble, we + should kill this test and uncomment c_longdouble inside formats. + """ + def test_simple(self): # create classes holding simple numeric types, and check # various properties. diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): @@ -1903,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1471,8 +1471,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): @@ -769,11 +774,19 @@ self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) - # Rarely called: allocate a fixed-size amount of bytes, but - # not in the nursery, because it is too big. Implemented like - # malloc_nursery_slowpath() above. - self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, - [lltype.Signed]) + # Never called as far as I can tell, but there for completeness: + # allocate a fixed-size object, but not in the nursery, because + # it is too big. + def malloc_big_fixedsize(size, tid): + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) def _bh_malloc(self, sizedescr): from pypy.rpython.memory.gctypelayout import check_typeid diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -96,8 +96,10 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - self.gen_malloc_nursery(size, op.result) - self.gen_initialize_tid(op.result, descr.tid) + if self.gen_malloc_nursery(size, op.result): + self.gen_initialize_tid(op.result, descr.tid) + else: + self.gen_malloc_fixedsize(size, descr.tid, op.result) def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) @@ -112,8 +114,8 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily - self.gen_malloc_nursery(total_size, op.result) + if (total_size >= 0 and + self.gen_malloc_nursery(total_size, op.result)): self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) elif self.gc_ll_descr.kind == 'boehm': @@ -147,13 +149,22 @@ # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None - def gen_malloc_fixedsize(self, size, v_result): - """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). - Note that with the framework GC, this should be called very rarely. + def gen_malloc_fixedsize(self, size, typeid, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). + Used on Boehm, and on the framework GC for large fixed-size + mallocs. (For all I know this latter case never occurs in + practice, but better safe than sorry.) """ - addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') - self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, - self.gc_ll_descr.malloc_fixedsize_descr) + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + assert (size & (WORD-1)) == 0, "size not aligned?" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + else: # Boehm + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + args = [ConstInt(addr), ConstInt(size)] + descr = self.gc_ll_descr.malloc_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -211,8 +222,7 @@ """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_fixedsize(size, v_result) - return + return False # op = None if self._op_malloc_nursery is not None: @@ -238,6 +248,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -57,6 +57,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -119,12 +119,19 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=alendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + 10, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(adescr.basesize + 10 * adescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=alendescr) def test_new_array_variable(self): self.check_rewrite(""" @@ -178,13 +185,20 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=unicodelendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(unicodedescr.basesize)d, \ + 10, \ + %(unicodedescr.itemsize)d, \ + %(unicodelendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(unicodedescr.basesize + \ +## 10 * unicodedescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=unicodelendescr) class TestFramework(RewriteTests): @@ -203,7 +217,7 @@ # class FakeCPU(object): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(102) + descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -368,11 +382,9 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 104)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 103, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 103, \ + descr=malloc_array_descr) jump() """) @@ -435,9 +447,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 9315, descr=tiddescr) + p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ + descr=malloc_big_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -266,6 +266,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() @@ -572,7 +604,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1458,7 +1490,8 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') @@ -3029,7 +3062,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3074,7 +3107,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -88,7 +88,6 @@ self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self.fail_boxes_count = 0 - self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 self.propagate_exception_path = 0 @@ -442,10 +441,8 @@ looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - clt.param_depth = -1 # temporarily - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -459,8 +456,7 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -500,14 +496,13 @@ assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) - fail_depths = faildescr._x86_current_depths startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs) stackadjustpos = self._patchable_stackadjust() - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -517,19 +512,16 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -700,15 +692,12 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.get_frame_depth() - param_depth = regalloc.param_depth + frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - param_depth = max(param_depth, target_param_depth) - return frame_depth, param_depth + return frame_depth def _patchable_stackadjust(self): # stack adjustment LEA @@ -892,10 +881,9 @@ genop_math_list[oopspecindex](self, op, arglocs, resloc) def regalloc_perform_with_guard(self, op, guard_op, faillocs, - arglocs, resloc, current_depths): + arglocs, resloc): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - faildescr._x86_current_depths = current_depths failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, @@ -911,10 +899,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, - current_depths): + def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc): self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, - resloc, current_depths) + resloc) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) @@ -1038,13 +1025,14 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) + # + self._regalloc.needed_extra_stack_locations(p//WORD) def _fix_stdcall(self, callconv, p): from pypy.rlib.clibffi import FFI_STDCALL @@ -1127,9 +1115,9 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index) + self._regalloc.needed_extra_stack_locations(len(pass_on_stack)) def call(self, addr, args, res): force_index = self.write_new_force_index() @@ -2136,7 +2124,6 @@ if reg in save_registers: self.mc.MOV_sr(p, reg.value) p += WORD - self._regalloc.reserve_param(p//WORD) # if gcrootmap.is_shadow_stack: args = [] @@ -2192,6 +2179,7 @@ if reg in save_registers: self.mc.MOV_rs(reg.value, p) p += WORD + self._regalloc.needed_extra_stack_locations(p//WORD) def call_reacquire_gil(self, gcrootmap, save_loc): # save the previous result (eax/xmm0) into the stack temporarily. @@ -2199,7 +2187,6 @@ # to save xmm0 in this case. if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_sr(WORD, save_loc.value) - self._regalloc.reserve_param(2) # call the reopenstack() function (also reacquiring the GIL) if gcrootmap.is_shadow_stack: args = [] @@ -2219,6 +2206,7 @@ # restore the result from the stack if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_rs(save_loc.value, WORD) + self._regalloc.needed_extra_stack_locations(2) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2495,11 +2483,6 @@ # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - # reserve room for the argument to the real malloc and the - # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 - # word) - self._regalloc.reserve_param(1+16) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: @@ -2510,6 +2493,11 @@ slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) + # reserve room for the argument to the real malloc and the + # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 + # word) + self._regalloc.needed_extra_stack_locations(1+16) + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -168,7 +168,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - self.param_depth = 0 + self.min_frame_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -193,11 +193,9 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, - allgcrefs): + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -205,8 +203,15 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - def reserve_param(self, n): - self.param_depth = max(self.param_depth, n) + def needed_extra_stack_locations(self, n): + # call *after* you needed extra stack locations: (%esp), (%esp+4)... + min_frame_depth = self.fm.get_frame_depth() + n + if min_frame_depth > self.min_frame_depth: + self.min_frame_depth = min_frame_depth + + def get_final_frame_depth(self): + self.needed_extra_stack_locations(0) # update min_frame_depth + return self.min_frame_depth def _set_initial_bindings(self, inputargs): if IS_X86_64: @@ -376,25 +381,12 @@ def locs_for_fail(self, guard_op): return [self.loc(v) for v in guard_op.getfailargs()] - def get_current_depth(self): - # return (self.fm.frame_depth, self.param_depth), but trying to share - # the resulting tuple among several calls - arg0 = self.fm.get_frame_depth() - arg1 = self.param_depth - result = self.assembler._current_depths_cache - if result[0] != arg0 or result[1] != arg1: - result = (arg0, arg1) - self.assembler._current_depths_cache = result - return result - def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - current_depths = self.get_current_depth() self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, - arglocs, result_loc, - current_depths) + arglocs, result_loc) if op.result is not None: self.possibly_free_var(op.result) self.possibly_free_vars(guard_op.getfailargs()) @@ -407,10 +399,8 @@ arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - current_depths = self.get_current_depth() self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, - result_loc, - current_depths) + result_loc) self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): @@ -1393,7 +1383,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -28,7 +28,7 @@ class MockGcRootMap(object): is_shadow_stack = False - def get_basic_shape(self, is_64_bit): + def get_basic_shape(self): return ['shape'] def add_frame_offset(self, shape, offset): shape.append(offset) @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -34,7 +34,6 @@ ''' loop = self.interpret(ops, [0]) previous = loop._jitcelltoken.compiled_loop_token.frame_depth - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -51,7 +50,6 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous fail = self.run(loop, 0) @@ -116,10 +114,8 @@ loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -606,23 +606,37 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): - def expected_param_depth(self, num_args): + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if IS_X86_32: - return num_args + extra_esp = num_call_args + return extra_esp elif IS_X86_64: - return max(num_args - 6, 0) + # 'num_pushed_input_args' is for X86_64 only + extra_esp = max(num_call_args - 6, 0) + return num_pushed_input_args + extra_esp def test_one_call(self): ops = ''' - [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) - assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(1) + assert clt.frame_depth == self.expected_frame_depth(1, 5) + + def test_one_call_reverse(self): + ops = ''' + [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0] + i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) + ''' + loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.frame_depth == self.expected_frame_depth(1, 6) def test_two_calls(self): ops = ''' @@ -634,7 +648,7 @@ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(2) + assert clt.frame_depth == self.expected_frame_depth(2, 5) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,25 +662,31 @@ loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(10) + assert clt.frame_depth == self.expected_frame_depth(10) def test_bridge_calls_1(self): ops = ''' [i0, i1] i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_value(i2, 0, descr=fdescr1) [i2, i1] + guard_value(i2, 0, descr=fdescr1) [i2, i0, i1] finish(i1) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 5 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(1, 2) + ops = ''' - [i2, i1] + [i2, i0, i1] i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) - finish(i3, descr=fdescr2) + finish(i3, i0, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(2, 2) self.run(loop, 4, 7) assert self.getint(0) == 5*7 @@ -676,10 +696,14 @@ [i0, i1] i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) guard_value(i2, 0, descr=fdescr1) [i2] - finish(i1) + finish(i2) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 4*7 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(2) + ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) @@ -687,7 +711,9 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(1) self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1379,7 +1379,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -289,8 +289,21 @@ assert isinstance(token, TargetToken) assert token.original_jitcell_token is None token.original_jitcell_token = trace.original_jitcell_token - - + + +def do_compile_loop(metainterp_sd, inputargs, operations, looptoken, + log=True, name=''): + metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, + 'compiling', name=name) + return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + log=log, name=name) + +def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, + original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2) + return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -319,9 +332,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - original_jitcell_token, - name=loopname) + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -333,7 +346,6 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: @@ -365,9 +377,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs, - operations, - original_loop_token) + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -18,6 +18,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif number == -2: + debug_start("jit-log-compiling-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, @@ -31,6 +35,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif number == -2: + debug_start("jit-log-compiling-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, @@ -102,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -141,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,40 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_issue1045(self): + ops = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) + jump(i55) + """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -423,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) @@ -457,7 +491,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -260,7 +259,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,12 +267,14 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -339,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -351,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -368,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -442,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -525,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -535,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -547,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -574,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -974,9 +974,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1030,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1576,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1597,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1615,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1984,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2064,11 +2072,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: @@ -2084,7 +2093,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] @@ -2349,7 +2358,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -1101,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -144,7 +144,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +235,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe def test_get_current_qmut_instance(): @@ -480,6 +480,32 @@ assert res == 1 self.check_jitcell_token_count(2) + def test_for_loop_array(self): + myjitdriver = JitDriver(greens=[], reds=["n", "i"]) + class Foo(object): + _immutable_fields_ = ["x?[*]"] + def __init__(self, x): + self.x = x + f = Foo([1, 3, 5, 6]) + @unroll_safe + def g(v): + for x in f.x: + if x & 1 == 0: + v += 1 + return v + def main(n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i) + i = g(i) + return i + res = self.meta_interp(main, [10]) + assert res == 10 + self.check_resops({ + "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2, + "guard_not_invalidated": 2 + }) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -326,8 +326,11 @@ try: space.call_method(w_iobase, 'flush') except OperationError, e: - # if it's an IOError, ignore it - if not e.match(space, space.w_IOError): + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): raise diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -178,7 +178,7 @@ space.finish() assert tmpfile.read() == '42' -def test_flush_at_exit_IOError(): +def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -190,7 +190,12 @@ def flush(self): raise IOError + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + s = MyStream() + s2 = MyStream2() import sys; sys._keepalivesomewhereobscure = s """) space.finish() # the IOError has been ignored diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -159,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -583,13 +586,29 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) - else: - return space.w_NotImplemented + @specialize.arg(3) + def _cmp_impl(space, self, other, space_fn): + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) + + def eq__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -851,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -874,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -357,7 +357,7 @@ def test_cannot_write_pyc(self): import sys, os - p = os.path.join(sys.path[-1], 'readonly') + p = os.path.join(sys.path[0], 'readonly') try: os.chmod(p, 0555) except: diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -72,7 +72,7 @@ Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. - + The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) @@ -121,13 +121,14 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[2:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), + op.getarg(2).getint(), w_greenkey)) else: l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, @@ -164,14 +165,16 @@ llres = res.llbox return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - at unwrap_spec(repr=str, jd_name=str, call_depth=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey): + at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + w_greenkey): + args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in space.listview(w_args)] num = rop.DEBUG_MERGE_POINT return DebugMergePoint(space, jit_hooks.resop_new(num, args, jit_hooks.emptyval()), - repr, jd_name, call_depth, w_greenkey) + repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(Wrappable): """ A class representing a single ResOperation, wrapped nicely @@ -206,10 +209,13 @@ jit_hooks.resop_setresult(self.op, box.llbox) class DebugMergePoint(WrappedOp): - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey): + def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, + w_greenkey): + WrappedOp.__init__(self, op, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth + self.call_id = call_id self.w_greenkey = w_greenkey def get_pycode(self, space): @@ -246,6 +252,7 @@ pycode = GetSetProperty(DebugMergePoint.get_pycode), bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no), call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint), + call_id = interp_attrproperty("call_id", cls=DebugMergePoint), jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name), ) DebugMergePoint.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -54,7 +54,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations @@ -87,7 +87,7 @@ def interp_on_abort(): pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey, 'blah') - + cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) @@ -105,7 +105,7 @@ def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen): all.append((name, looptype, tuple_or_guard_no, ops)) - + self.on_compile() pypyjit.set_compile_hook(hook) assert not all @@ -123,6 +123,7 @@ assert dmp.pycode is self.f.func_code assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 + assert dmp.call_id == 0 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num self.on_compile_bridge() @@ -151,18 +152,18 @@ def test_non_reentrant(self): import pypyjit l = [] - + def hook(*args): l.append(None) self.on_compile() self.on_compile_bridge() - + pypyjit.set_compile_hook(hook) self.on_compile() assert len(l) == 1 # and did not crash self.on_compile_bridge() assert len(l) == 2 # and did not crash - + def test_on_compile_types(self): import pypyjit l = [] @@ -182,7 +183,7 @@ def hook(jitdriver_name, greenkey, reason): l.append((jitdriver_name, reason)) - + pypyjit.set_abort_hook(hook) self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG')] @@ -224,13 +225,14 @@ def f(): pass - op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0)) + op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0)) assert op.bytecode_no == 0 assert op.pycode is f.func_code assert repr(op) == 'repr' assert op.jitdriver_name == 'pypyjit' assert op.num == self.dmp_num assert op.call_depth == 2 - op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',)) + assert op.call_id == 3 + op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',)) raises(AttributeError, 'op.pycode') assert op.call_depth == 5 diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -60,6 +60,9 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if getattr(pipe, 'returncode', 0) < 0: + raise IOError("subprocess was killed by signal %d" % ( + pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -0,0 +1,26 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestAlloc(BaseTestPyPyC): + + SIZES = dict.fromkeys([2 ** n for n in range(26)] + # up to 32MB + [2 ** n - 1 for n in range(26)]) + + def test_newstr_constant_size(self): + for size in TestAlloc.SIZES: + yield self.newstr_constant_size, size + + def newstr_constant_size(self, size): + src = """if 1: + N = %(size)d + part_a = 'a' * N + part_b = 'b' * N + for i in xrange(20): + ao = '%%s%%s' %% (part_a, part_b) + def main(): + return 42 +""" % {'size': size} + log = self.run(src, [], threshold=10) + assert log.result == 42 + loop, = log.loops_by_filename(self.filepath) + # assert did not crash diff --git a/pypy/module/select/test/test_ztranslation.py b/pypy/module/select/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_select_translates(): + checkmodule('select') diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -6,7 +6,7 @@ from pypy.conftest import gettestobjspace -class AppTestcStringIO: +class AppTestCollections: def test_copy(self): import _collections def f(): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -103,6 +103,7 @@ 'terminator', '_version_tag?', 'name?', + 'mro_w?[*]', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -392,6 +392,9 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" +ENABLE_ALL_OPTS = ( + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll') + PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', @@ -402,7 +405,8 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', - 'enable_opts': 'optimizations to enable or all, INTERNAL USE ONLY' + 'enable_opts': 'INTERNAL USE ONLY: optimizations to enable, or all = %s' % + ENABLE_ALL_OPTS, } PARAMETERS = {'threshold': 1039, # just above 1024, prime diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -392,7 +392,11 @@ ('list', r_list.lowleveltype), ('index', Signed))) self.ll_listiter = ll_listiter - self.ll_listnext = ll_listnext + if (isinstance(r_list, FixedSizeListRepr) + and not r_list.listitem.mutated): + self.ll_listnext = ll_listnext_foldable + else: + self.ll_listnext = ll_listnext self.ll_getnextindex = ll_getnextindex def ll_listiter(ITERPTR, lst): @@ -409,5 +413,14 @@ iter.index = index + 1 # cannot overflow because index < l.length return l.ll_getitem_fast(index) +def ll_listnext_foldable(iter): + from pypy.rpython.rlist import ll_getitem_foldable_nonneg + l = iter.list + index = iter.index + if index >= l.ll_length(): + raise StopIteration + iter.index = index + 1 # cannot overflow because index < l.length + return ll_getitem_foldable_nonneg(l, index) + def ll_getnextindex(iter): return iter.index diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -62,6 +62,14 @@ @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): + """Copies 'length' characters from the 'src' string to the 'dst' + string, starting at position 'srcstart' and 'dststart'.""" + # xxx Warning: don't try to do this at home. It relies on a lot + # of details to be sure that it works correctly in all cases. + # Notably: no GC operation at all from the first cast_ptr_to_adr() + # because it might move the strings. The keepalive_until_here() + # are obscurely essential to make sure that the strings stay alive + # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -608,6 +608,11 @@ specified as 0 if the object is not varsized. The returned object is fully initialized and zero-filled.""" # + # Here we really need a valid 'typeid', not 0 (as the JIT might + # try to send us if there is still a bug). + ll_assert(bool(self.combine(typeid, 0)), + "external_malloc: typeid == 0") + # # Compute the total size, carefully checking for overflows. size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + self.fixed_size(typeid) diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -442,6 +442,8 @@ ll_assert(location >= 0, "negative location") kind = location & LOC_MASK offset = location & ~ LOC_MASK + if IS_64_BITS: + offset <<= 1 if kind == LOC_REG: # register if location == LOC_NOWHERE: return llmemory.NULL diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -364,6 +364,8 @@ def get_ll_hash_function(self): return ll_inst_hash + get_ll_fasthash_function = get_ll_hash_function + def rtype_type(self, hop): raise NotImplementedError diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -449,6 +449,21 @@ assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype + def test_identity_hash_is_fast(self): + class A(object): + pass + + def f(): + return {A(): 1} + + t = TranslationContext() + s = t.buildannotator().build_types(f, []) + rtyper = t.buildrtyper() + rtyper.specialize() + + r_dict = rtyper.getrepr(s) + assert not hasattr(r_dict.lowleveltype.TO.entries.TO.OF, "f_hash") + def test_tuple_dict(self): def f(i): d = {} diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -8,6 +8,7 @@ from pypy.rpython.rlist import * from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist from pypy.rpython.lltypesystem import rlist as ll_rlist +from pypy.rpython.llinterp import LLException from pypy.rpython.ootypesystem import rlist as oo_rlist from pypy.rpython.rint import signed_repr from pypy.objspace.flow.model import Constant, Variable @@ -1477,6 +1478,80 @@ assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') + def test_iterate_over_immutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_immutable_list_quasiimmut_attr(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + class Foo: + _immutable_fields_ = ['lst?[*]'] + lst = list('abcdef') + foo = Foo() + def dummyfn(): + total = 0 + for c in foo.lst: + total += ord(c) + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + e = raises(LLException, self.interpret, dummyfn, []) + assert 'KeyError' in str(e.value) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + def test_iterate_over_mutable_list(self): + from pypy.rpython import rlist + class MyException(Exception): + pass + lst = list('abcdef') + def dummyfn(): + total = 0 + for c in lst: + total += ord(c) + lst[0] = 'x' + return total + # + prev = rlist.ll_getitem_foldable_nonneg + try: + def seen_ok(l, index): + if index == 5: + raise KeyError # expected case + return prev(l, index) + rlist.ll_getitem_foldable_nonneg = seen_ok + res = self.interpret(dummyfn, []) + assert res == sum(map(ord, 'abcdef')) + finally: + rlist.ll_getitem_foldable_nonneg = prev + + class TestOOtype(BaseTestRlist, OORtypeMixin): rlist = oo_rlist type_system = 'ootype' diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -93,7 +93,7 @@ end_index += 1 op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - + def _asm_disassemble(self, d, origin_addr, tp): from pypy.jit.backend.x86.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @@ -109,7 +109,7 @@ if not argspec.strip(): return [], None if opname == 'debug_merge_point': - return argspec.split(", ", 1), None + return argspec.split(", ", 2), None else: args = argspec.split(', ') descr = None @@ -159,7 +159,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1][1:-1]) + self.parse_code_data(op.args[2][1:-1]) break else: self.inline_level = 0 @@ -417,7 +417,7 @@ part.descr = descrs[i] part.comment = trace.comment parts.append(part) - + return parts def parse_log_counts(input, loops): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -29,7 +29,7 @@ def test_parse_non_code(): ops = parse(''' [] - debug_merge_point(0, "SomeRandomStuff") + debug_merge_point(0, 0, "SomeRandomStuff") ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 @@ -39,10 +39,10 @@ ops = parse(''' [i0] label() - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage(), loopname='') @@ -57,12 +57,12 @@ def test_inlined_call(): ops = parse(""" [] - debug_merge_point(0, ' #28 CALL_FUNCTION') + debug_merge_point(0, 0, ' #28 CALL_FUNCTION') i18 = getfield_gc(p0, descr=) - debug_merge_point(1, ' #0 LOAD_FAST') - debug_merge_point(1, ' #3 LOAD_CONST') - debug_merge_point(1, ' #7 RETURN_VALUE') - debug_merge_point(0, ' #31 STORE_FAST') + debug_merge_point(1, 1, ' #0 LOAD_FAST') + debug_merge_point(1, 1, ' #3 LOAD_CONST') + debug_merge_point(1, 1, ' #7 RETURN_VALUE') + debug_merge_point(0, 0, ' #31 STORE_FAST') """) res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 # two chunks + inlined call @@ -75,10 +75,10 @@ def test_name(): ops = parse(''' [i0] - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -92,10 +92,10 @@ ops = parse(''' [i0] i3 = int_add(i0, 1) - debug_merge_point(0, " #10 ADD") - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #10 ADD") + debug_merge_point(0, 0, " #11 SUB") i1 = int_add(i0, 1) - debug_merge_point(0, " #11 SUB") + debug_merge_point(0, 0, " #11 SUB") i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) @@ -105,10 +105,10 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #0 LOAD_FAST") - debug_merge_point(0, " #3 LOAD_FAST") - debug_merge_point(0, " #6 BINARY_ADD") - debug_merge_point(0, " #7 RETURN_VALUE") + debug_merge_point(0, 0, " #0 LOAD_FAST") + debug_merge_point(0, 0, " #3 LOAD_FAST") + debug_merge_point(0, 0, " #6 BINARY_ADD") + debug_merge_point(0, 0, " #7 RETURN_VALUE") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.chunks[1].lineno == 3 @@ -119,11 +119,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(0, " #9 LOAD_FAST") - debug_merge_point(0, " #12 LOAD_CONST") - debug_merge_point(0, " #22 LOAD_CONST") - debug_merge_point(0, " #28 LOAD_CONST") - debug_merge_point(0, " #6 SETUP_LOOP") + debug_merge_point(0, 0, " #9 LOAD_FAST") + debug_merge_point(0, 0, " #12 LOAD_CONST") + debug_merge_point(0, 0, " #22 LOAD_CONST") + debug_merge_point(0, 0, " #28 LOAD_CONST") + debug_merge_point(0, 0, " #6 SETUP_LOOP") ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) @@ -135,7 +135,7 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(""" [p6, p1] - debug_merge_point(0, ' #17 FOR_ITER') + debug_merge_point(0, 0, ' #17 FOR_ITER') guard_class(p6, 144264192, descr=) p12 = getfield_gc(p6, descr=) """ % locals()) @@ -181,7 +181,7 @@ def test_parsing_strliteral(): loop = parse(""" - debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') + debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]') """) ops = Function.from_operations(loop.operations, LoopStorage()) chunk = ops.chunks[0] @@ -193,12 +193,12 @@ loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, ' #15 COMPARE_OP') +166: i6 = int_lt(i4, 10000) guard_true(i6, descr=) [p1, p0, p2, p3, i4] - debug_merge_point(0, ' #27 INPLACE_ADD') + debug_merge_point(0, 0, ' #27 INPLACE_ADD') +179: i8 = int_add(i4, 1) - debug_merge_point(0, ' #31 JUMP_ABSOLUTE') + debug_merge_point(0, 0, ' #31 JUMP_ABSOLUTE') +183: i10 = getfield_raw(40564608, descr=) +191: i12 = int_sub(i10, 1) +195: setfield_raw(40564608, i12, descr=) @@ -287,8 +287,8 @@ def test_parse_nonpython(): loop = parse(""" [] - debug_merge_point(0, 'random') - debug_merge_point(0, ' #15 COMPARE_OP') + debug_merge_point(0, 0, 'random') + debug_merge_point(0, 0, ' #15 COMPARE_OP') """) f = Function.from_operations(loop.operations, LoopStorage()) assert f.chunks[-1].filename == 'x.py' diff --git a/pypy/translator/c/gcc/instruction.py b/pypy/translator/c/gcc/instruction.py --- a/pypy/translator/c/gcc/instruction.py +++ b/pypy/translator/c/gcc/instruction.py @@ -13,13 +13,17 @@ ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') -def frameloc_esp(offset): +def frameloc_esp(offset, wordsize): assert offset >= 0 - assert offset % 4 == 0 + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them return LOC_ESP_PLUS | offset -def frameloc_ebp(offset): - assert offset % 4 == 0 +def frameloc_ebp(offset, wordsize): + assert offset % wordsize == 0 + if wordsize == 8: # in this case, there are 3 null bits, but we + offset >>= 1 # only need 2 of them if offset >= 0: return LOC_EBP_PLUS | offset else: @@ -57,12 +61,12 @@ # try to use esp-relative addressing ofs_from_esp = framesize + self.ofs_from_frame_end if ofs_from_esp % 2 == 0: - return frameloc_esp(ofs_from_esp) + return frameloc_esp(ofs_from_esp, wordsize) # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer ofs_from_ebp = self.ofs_from_frame_end + wordsize - return frameloc_ebp(ofs_from_ebp) + return frameloc_ebp(ofs_from_ebp, wordsize) class Insn(object): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -78,9 +78,9 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(self.WORD) + retaddr = frameloc_ebp(self.WORD, self.WORD) else: - retaddr = frameloc_esp(insn.framesize) + retaddr = frameloc_esp(insn.framesize, self.WORD) shape = [retaddr] # the first gcroots are always the ones corresponding to # the callee-saved registers @@ -894,6 +894,8 @@ return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") else: offset = loc & ~ LOC_MASK + if cls.WORD == 8: + offset <<= 1 if kind == LOC_EBP_PLUS: result = '(%' + cls.EBP.replace("%", "") + ')' elif kind == LOC_EBP_MINUS: diff --git a/pypy/translator/c/src/libffi_msvc/ffi.c b/pypy/translator/c/src/libffi_msvc/ffi.c --- a/pypy/translator/c/src/libffi_msvc/ffi.c +++ b/pypy/translator/c/src/libffi_msvc/ffi.c @@ -71,31 +71,31 @@ switch ((*p_arg)->type) { case FFI_TYPE_SINT8: - *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT8 *)(* p_argv); break; case FFI_TYPE_UINT8: - *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT8 *)(* p_argv); break; case FFI_TYPE_SINT16: - *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT16 *)(* p_argv); break; case FFI_TYPE_UINT16: - *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT16 *)(* p_argv); break; case FFI_TYPE_SINT32: - *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv); + *(signed int *) argp = (signed int)*(ffi_SINT32 *)(* p_argv); break; case FFI_TYPE_UINT32: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; case FFI_TYPE_STRUCT: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv); break; default: diff --git a/pypy/translator/c/src/libffi_msvc/ffi_common.h b/pypy/translator/c/src/libffi_msvc/ffi_common.h --- a/pypy/translator/c/src/libffi_msvc/ffi_common.h +++ b/pypy/translator/c/src/libffi_msvc/ffi_common.h @@ -56,16 +56,18 @@ } extended_cif; /* Terse sized type definitions. */ -typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); -typedef signed int SINT8 __attribute__((__mode__(__QI__))); -typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); -typedef signed int SINT16 __attribute__((__mode__(__HI__))); -typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); -typedef signed int SINT32 __attribute__((__mode__(__SI__))); -typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); -typedef signed int SINT64 __attribute__((__mode__(__DI__))); +/* Fix for PyPy: these names are fine, but are bound to conflict with + * some other name from somewhere else :-( Added a 'ffi_' prefix. */ +typedef unsigned int ffi_UINT8 __attribute__((__mode__(__QI__))); +typedef signed int ffi_SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int ffi_UINT16 __attribute__((__mode__(__HI__))); +typedef signed int ffi_SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int ffi_UINT32 __attribute__((__mode__(__SI__))); +typedef signed int ffi_SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int ffi_UINT64 __attribute__((__mode__(__DI__))); +typedef signed int ffi_SINT64 __attribute__((__mode__(__DI__))); -typedef float FLOAT32; +typedef float ffi_FLOAT32; #ifdef __cplusplus diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -130,30 +130,46 @@ sys.executable,) print __doc__.rstrip() if 'pypyjit' in sys.builtin_module_names: - _print_jit_help() + print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" print raise SystemExit def _print_jit_help(): - import pypyjit + try: + import pypyjit + except ImportError: + print >> sys.stderr, "No jit support in %s" % (sys.executable,) + return items = pypyjit.defaults.items() items.sort() + print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:' for key, value in items: - prefix = ' --jit %s=N %s' % (key, ' '*(18-len(key))) + print + print ' %s=N' % (key,) doc = '%s (default %s)' % (pypyjit.PARAMETER_DOCS[key], value) - while len(doc) > 51: - i = doc[:51].rfind(' ') - print prefix + doc[:i] + while len(doc) > 72: + i = doc[:74].rfind(' ') + if i < 0: + i = doc.find(' ') + if i < 0: + i = len(doc) + print ' ' + doc[:i] doc = doc[i+1:] - prefix = ' '*len(prefix) - print prefix + doc - print ' --jit off turn off the JIT' + print ' ' + doc + print + print ' off' + print ' turn off the JIT' + print ' help' + print ' print this page' def print_version(*args): print >> sys.stderr, "Python", sys.version raise SystemExit def set_jit_option(options, jitparam, *args): + if jitparam == 'help': + _print_jit_help() + raise SystemExit if 'pypyjit' not in sys.builtin_module_names: print >> sys.stderr, ("Warning: No jit support in %s" % (sys.executable,)) From noreply at buildbot.pypy.org Sun Mar 4 19:35:46 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Mar 2012 19:35:46 +0100 (CET) Subject: [pypy-commit] pypy default: allow inlining into rctime Message-ID: <20120304183546.C69C382008@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53186:f038c6e11906 Date: 2012-03-04 13:35 -0500 http://bitbucket.org/pypy/pypy/changeset/f038c6e11906/ Log: allow inlining into rctime diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -127,7 +127,7 @@ 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', - 'mmap', 'marshal', '_codecs']: + 'mmap', 'marshal', '_codecs', 'rctime']: if modname == 'pypyjit' and 'interp_resop' in rest: return False return True diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -38,6 +38,10 @@ assert pypypolicy.look_inside_function(Local.getdict.im_func) assert pypypolicy.look_inside_function(get_ident) +def test_time(): + from pypy.module.rctime.interp_time import time + assert pypypolicy.look_inside_function(time) + def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque from pypy.module._random.interp_random import W_Random From noreply at buildbot.pypy.org Sun Mar 4 20:04:34 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 20:04:34 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Bah. Message-ID: <20120304190434.1D84D82008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r335:784b62e9358e Date: 2012-03-04 20:04 +0100 http://bitbucket.org/pypy/pypy.org/changeset/784b62e9358e/ Log: Bah. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -76,8 +76,10 @@
  • Linux binary (32bit) (openssl0.9.8 notes)
  • Linux binary (64bit) (openssl0.9.8 notes)
  • Mac OS/X binary (64bit)
  • -
  • Windows binary (32bit)
  • -

    If your CPU is really old, it may not have SSE2. In this case, you need +

  • Windows binary (32bit) (you need the VS 2010 runtime libraries) +note: the zip file contains the wrong version, msvcrt90.dll :-(
  • + +

    If your CPU is really old, it may not have SSE2. In this case, you need to translate yourself with the option --jit-backend=x86-without-sse2.

    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -45,13 +45,14 @@ * `Linux binary (32bit)`__ (`openssl0.9.8 notes`_) * `Linux binary (64bit)`__ (`openssl0.9.8 notes`_) * `Mac OS/X binary (64bit)`__ -* `Windows binary (32bit)`__ +* `Windows binary (32bit)`__ (you need the `VS 2010 runtime libraries`_) + *note: the zip file contains the wrong version, msvcrt90.dll :-(* .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-linux.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-linux64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-osx64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-win32.zip -.. VS 2010 runtime libraries: http://www.microsoft.com/downloads/en/details.aspx?familyid=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84 +.. _`VS 2010 runtime libraries`: http://www.microsoft.com/download/en/details.aspx?displaylang=en&id=5555 If your CPU is really old, it may not have SSE2. In this case, you need to translate_ yourself with the option ``--jit-backend=x86-without-sse2``. From noreply at buildbot.pypy.org Sun Mar 4 20:05:54 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 20:05:54 +0100 (CET) Subject: [pypy-commit] pypy default: Bah. Message-ID: <20120304190554.4608782008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53187:932572b67805 Date: 2012-03-04 20:05 +0100 http://bitbucket.org/pypy/pypy/changeset/932572b67805/ Log: Bah. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -60,7 +60,7 @@ if sys.platform == 'win32': # Can't rename a DLL: it is always called 'libpypy-c.dll' for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr90.dll', + 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Sun Mar 4 20:10:23 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Mar 2012 20:10:23 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: A minor comment. Message-ID: <20120304191023.35E8982008@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r336:9b03eb857c1d Date: 2012-03-04 20:10 +0100 http://bitbucket.org/pypy/pypy.org/changeset/9b03eb857c1d/ Log: A minor comment. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -89,7 +89,8 @@
  • The most up-to-date nightly build with a JIT, if the official release is too old for what you want to do.
  • No JIT: A version without the JIT. Consumes a bit less memory -and may be faster on short-running scripts.
  • +and may be faster on short-running scripts. (Note that a similar +effect can be obtained by running pypy --jit off.)
  • Sandboxing: A special safe version. Read the docs about sandboxing. (It is also possible to translate a version that includes both sandboxing and the JIT compiler, although as the JIT is relatively diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -68,7 +68,8 @@ release is too old for what you want to do. * No JIT: A version without the JIT. Consumes a bit less memory - and may be faster on short-running scripts. + and may be faster on short-running scripts. (Note that a similar + effect can be obtained by running ``pypy --jit off``.) * Sandboxing: A special safe version. Read the docs about sandboxing_. (It is also possible to translate_ a version that includes both From noreply at buildbot.pypy.org Sun Mar 4 20:39:20 2012 From: noreply at buildbot.pypy.org (oberstet) Date: Sun, 4 Mar 2012 20:39:20 +0100 (CET) Subject: [pypy-commit] pypy kqueue: Merging trunk and resolving conflicts. Message-ID: <20120304193920.2F97682008@wyvern.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: kqueue Changeset: r53188:b12df2b6eaa8 Date: 2012-03-02 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b12df2b6eaa8/ Log: Merging trunk and resolving conflicts. diff too long, truncating to 10000 out of 981602 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,10 +1,16 @@ syntax: glob *.py[co] *~ +.*.swp +.idea +.project +.pydevproject syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -15,12 +21,15 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/doc/.+\.html$ +^pypy/doc/config/.+\.rst$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ ^pypy/translator/c/src/libffi_msvc/.+\.exp$ +^pypy/translator/c/src/cjkcodecs/.+\.o$ +^pypy/translator/c/src/cjkcodecs/.+\.obj$ ^pypy/translator/jvm/\.project$ ^pypy/translator/jvm/\.classpath$ ^pypy/translator/jvm/eclipse-bin$ @@ -33,12 +42,12 @@ ^pypy/translator/benchmark/shootout_benchmarks$ ^pypy/translator/goal/pypy-translation-snapshot$ ^pypy/translator/goal/pypy-c +^pypy/translator/goal/pypy-jvm +^pypy/translator/goal/pypy-jvm.jar ^pypy/translator/goal/.+\.exe$ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ @@ -61,6 +70,7 @@ ^pypy/doc/image/lattice3\.png$ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ +^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^compiled ^.git/ ^release/ diff --git a/.hgtags b/.hgtags new file mode 100644 --- /dev/null +++ b/.hgtags @@ -0,0 +1,4 @@ +b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 +b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked +d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -27,7 +27,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2011 +PyPy Copyright holders 2003-2012 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at @@ -37,78 +37,176 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Amaury Forgeot d'Arc + Antonio Cuni Samuele Pedroni - Antonio Cuni Michael Hudson + Holger Krekel + Alex Gaynor Christian Tismer - Holger Krekel + Hakan Ardo + Benjamin Peterson + David Schneider Eric van Riet Paap + Anders Chrigstrom Richard Emslie - Anders Chrigstrom - Amaury Forgeot d Arc + Dan Villiom Podlaski Christiansen + Alexander Schremmer + Lukas Diekmann Aurelien Campeas Anders Lehmann + Camillo Bruni Niklaus Haldimann + Sven Hager + Leonardo Santagada + Toon Verwaest Seo Sanghyeon - Leonardo Santagada + Justin Peel Lawrence Oluyede + Bartosz Skowron Jakub Gustak Guido Wesdorp - Benjamin Peterson - Alexander Schremmer + Daniel Roberts + Laura Creighton + Adrien Di Mascio + Ludovic Aubry Niko Matsakis - Ludovic Aubry + Wim Lavrijsen + Matti Picus + Jason Creighton + Jacob Hallen Alex Martelli - Toon Verwaest + Anders Hammarquist + Jan de Mooij Stephan Diehl - Adrien Di Mascio + Michael Foord Stefan Schwarzer Tomek Meka Patrick Maupin - Jacob Hallen - Laura Creighton Bob Ippolito - Camillo Bruni - Simon Burton Bruno Gola Alexandre Fayolle Marius Gedminas + Simon Burton + David Edelsohn + Jean-Paul Calderone + John Witulski + Timo Paulssen + holger krekel + Dario Bertini + Mark Pearse + Andreas Stührk + Jean-Philippe St. Pierre Guido van Rossum + Pavel Vinogradov Valentino Volonghi + Paul deGrandis + Ilya Osadchiy + Ronny Pfannschmidt Adrian Kuhn - Paul deGrandis + tav + Georg Brandl + Philip Jenvey Gerald Klix Wanja Saatkamp - Anders Hammarquist + Boris Feigin Oscar Nierstrasz + David Malcolm Eugene Oden + Henry Mason + Jeff Terrace Lukas Renggli Guenter Jantzen + Ned Batchelder + Bert Freudenberg + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Igor Trindade Oliveira + Lucian Branescu Mihaila + Olivier Dormond + Jared Grubb + Karl Bartel + Gabriel Lavoie + Victor Stinner + Brian Dorsey + Stuart Williams + Toby Watson + Antoine Pitrou + Justas Sadzevicius + Neil Shepperd + Mikael Schönenberg + Gasper Zejn + Jonathan David Riehl + Elmo Mäntynen + Anders Qvist + Beatrice During + Alexander Sedov + Corbin Simpson + Vincent Legoll + Romain Guillebert + Alan McIntyre + Alex Perry + Jens-Uwe Mager + Simon Cross + Dan Stromberg + Guillebert Romain + Carl Meyer + Pieter Zieschang + Alejandro J. Cura + Sylvain Thenault + Christoph Gerum + Travis Francis Athougies + Henrik Vendelbo + Lutz Paelike + Jacob Oscarson + Martin Blais + Lucio Torre + Lene Wagner + Miguel de Val Borro + Artur Lisiecki + Bruno Gola + Ignas Mikalajunas + Stefano Rivera + Joshua Gilbert + Godefroid Chappelle + Yusei Tahara + Christopher Armstrong + Stephan Busemann + Gustavo Niemeyer + William Leslie + Akira Li + Kristjan Valur Jonsson + Bobby Impollonia + Michael Hudson-Doyle + Laurence Tratt + Yasir Suhail + Andrew Thompson + Anders Sigfridsson + Floris Bruynooghe + Jacek Generowicz + Dan Colish + Zooko Wilcox-O Hearn + Dan Loewenherz + Chris Lambacher Dinu Gherman - Bartosz Skowron - Georg Brandl - Ben Young - Jean-Paul Calderone - Nicolas Chauvat - Rocco Moretti - Michael Twomey - boria - Jared Grubb - Olivier Dormond - Stuart Williams - Jens-Uwe Mager - Justas Sadzevicius - Mikael Schönenberg - Brian Dorsey - Jonathan David Riehl - Beatrice During - Elmo Mäntynen - Andreas Friedge - Alex Gaynor - Anders Qvist - Alan McIntyre - Bert Freudenberg - Tav + Brett Cannon + Daniel Neuhäuser + Michael Chermside + Konrad Delong + Anna Ravencroft + Greg Price + Armin Ronacher + Christian Muirhead + Jim Baker + Rodrigo Araújo + Romain Guillebert Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -119,13 +217,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -158,21 +259,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/README b/README --- a/README +++ b/README @@ -15,10 +15,10 @@ The getting-started document will help guide you: - http://codespeak.net/pypy/dist/pypy/doc/getting-started.html + http://doc.pypy.org/en/latest/getting-started.html It will also point you to the rest of the documentation which is generated from files in the pypy/doc directory within the source repositories. Enjoy and send us feedback! - the pypy-dev team + the pypy-dev team diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.0.3.dev3' +__version__ = '2.1.0.dev4' diff --git a/_pytest/assertion.py b/_pytest/assertion.py deleted file mode 100644 --- a/_pytest/assertion.py +++ /dev/null @@ -1,179 +0,0 @@ -""" -support for presented detailed information in failing assertions. -""" -import py -import sys -from _pytest.monkeypatch import monkeypatch - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group._addoption('--no-assert', action="store_true", default=False, - dest="noassert", - help="disable python assert expression reinterpretation."), - -def pytest_configure(config): - # The _reprcompare attribute on the py.code module is used by - # py._code._assertionnew to detect this plugin was loaded and in - # turn call the hooks defined here as part of the - # DebugInterpreter. - config._monkeypatch = m = monkeypatch() - warn_about_missing_assertion() - if not config.getvalue("noassert") and not config.getvalue("nomagic"): - def callbinrepr(op, left, right): - hook_result = config.hook.pytest_assertrepr_compare( - config=config, op=op, left=left, right=right) - for new_expl in hook_result: - if new_expl: - return '\n~'.join(new_expl) - m.setattr(py.builtin.builtins, - 'AssertionError', py.code._AssertionError) - m.setattr(py.code, '_reprcompare', callbinrepr) - -def pytest_unconfigure(config): - config._monkeypatch.undo() - -def warn_about_missing_assertion(): - try: - assert False - except AssertionError: - pass - else: - sys.stderr.write("WARNING: failing tests may report as passing because " - "assertions are turned off! (are you using python -O?)\n") - -# Provide basestring in python3 -try: - basestring = basestring -except NameError: - basestring = str - - -def pytest_assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = py.io.saferepr(left, maxsize=int(width/2)) - right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) - - issequence = lambda x: isinstance(x, (list, tuple)) - istext = lambda x: isinstance(x, basestring) - isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) - - explanation = None - try: - if op == '==': - if istext(left) and istext(right): - explanation = _diff_text(left, right) - elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) - elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) - elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) - elif op == 'not in': - if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: - excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - - - if not explanation: - return None - - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - - return [summary] + explanation - - -def _diff_text(left, right): - """Return the explanation for the diff between text - - This will skip leading and trailing characters which are - identical to keep the diff minimal. - """ - explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: - break - if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] - explanation += [line.strip('\n') - for line in py.std.difflib.ndiff(left.splitlines(), - right.splitlines())] - return explanation - - -def _compare_eq_sequence(left, right): - explanation = [] - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] - break - if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] - elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) - - -def _compare_eq_set(left, right): - explanation = [] - diff_left = left - right - diff_right = right - left - if diff_left: - explanation.append('Extra items in the left set:') - for item in diff_left: - explanation.append(py.io.saferepr(item)) - if diff_right: - explanation.append('Extra items in the right set:') - for item in diff_right: - explanation.append(py.io.saferepr(item)) - return explanation - - -def _notin_text(term, text): - index = text.find(term) - head = text[:index] - tail = text[index+len(term):] - correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] - for line in diff: - if line.startswith('Skipping'): - continue - if line.startswith('- '): - continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) - else: - newdiff.append(line) - return newdiff diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion/__init__.py @@ -0,0 +1,128 @@ +""" +support for presenting detailed information in failing assertions. +""" +import py +import imp +import marshal +import struct +import sys +import pytest +from _pytest.monkeypatch import monkeypatch +from _pytest.assertion import reinterpret, util + +try: + from _pytest.assertion.rewrite import rewrite_asserts +except ImportError: + rewrite_asserts = None +else: + import ast + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--assertmode', action="store", dest="assertmode", + choices=("on", "old", "off", "default"), default="default", + metavar="on|old|off", + help="""control assertion debugging tools. +'off' performs no assertion debugging. +'old' reinterprets the expressions in asserts to glean information. +'on' (the default) rewrites the assert statements in test modules to provide +sub-expression results.""") + group.addoption('--no-assert', action="store_true", default=False, + dest="noassert", help="DEPRECATED equivalent to --assertmode=off") + group.addoption('--nomagic', action="store_true", default=False, + dest="nomagic", help="DEPRECATED equivalent to --assertmode=off") + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config, mode): + self.mode = mode + self.trace = config.trace.root.get("assertion") + +def pytest_configure(config): + warn_about_missing_assertion() + mode = config.getvalue("assertmode") + if config.getvalue("noassert") or config.getvalue("nomagic"): + if mode not in ("off", "default"): + raise pytest.UsageError("assertion options conflict") + mode = "off" + elif mode == "default": + mode = "on" + if mode != "off": + def callbinrepr(op, left, right): + hook_result = config.hook.pytest_assertrepr_compare( + config=config, op=op, left=left, right=right) + for new_expl in hook_result: + if new_expl: + return '\n~'.join(new_expl) + m = monkeypatch() + config._cleanup.append(m.undo) + m.setattr(py.builtin.builtins, 'AssertionError', + reinterpret.AssertionError) + m.setattr(util, '_reprcompare', callbinrepr) + if mode == "on" and rewrite_asserts is None: + mode = "old" + config._assertstate = AssertionState(config, mode) + config._assertstate.trace("configured with mode set to %r" % (mode,)) + +def _write_pyc(co, source_path): + if hasattr(imp, "cache_from_source"): + # Handle PEP 3147 pycs. + pyc = py.path.local(imp.cache_from_source(str(source_path))) + pyc.ensure() + else: + pyc = source_path + "c" + mtime = int(source_path.mtime()) + fp = pyc.open("wb") + try: + fp.write(imp.get_magic()) + fp.write(struct.pack(">", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Pow : "**", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information. """ + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if _is_ast_expr(node): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif _is_ast_stmt(node): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle %s" %(node,)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "%r in locals() is not globals()" % (name.id,) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = None + if local is None or not self.frame.is_true(local): + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + for op, next_op in zip(comp.ops, comp.comparators): + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + try: + if not self.frame.is_true(result): + break + except KeyboardInterrupt: + raise + except: + break + left_explanation, left_result = next_explanation, next_result + + if util._reprcompare is not None: + res = util._reprcompare(op_symbol, left_result, next_result) + if res: + explanation = res + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + keyword_source = "%s=%%s" % (keyword.arg) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) + args_explained = ", ".join(arg_explanations) + explanation = "%s(%s)" % (func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func(%s)" % (args,) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) + # Check if the attr is from an instance. + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = None + if from_instance is None or self.frame.is_true(from_instance): + rep = self.frame.repr(result) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + explanation = "assert %s" % (test_explanation,) + if not self.frame.is_true(test_result): + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = %s" % (value_explanation,) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion/oldinterpret.py @@ -0,0 +1,552 @@ +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from _pytest.assertion.util import format_explanation +from _pytest.assertion.reinterpret import BuiltinAssertionError + +passthroughex = py.builtin._sysex + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + source = '%r in locals() is not globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + source = '%r in globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + source = '%r not in locals() and %r not in globals()' % ( + self.name, self.name) + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + source = "__exprinfo_left %s __exprinfo_right" % operation + try: + self.result = frame.eval(source, + __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(astpattern, + __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(astpattern, + __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + source = 'isinstance(__exprinfo_value, bool)' + try: + return frame.is_true(frame.eval(source, + __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + try: + self.result = frame.eval(source, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + source = '__exprinfo_expr.%s' % self.attrname + try: + self.result = frame.eval(source, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + source = ('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname) + try: + from_instance = frame.is_true( + frame.eval(source, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, py.std.types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion/reinterpret.py @@ -0,0 +1,48 @@ +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + +class AssertionError(BuiltinAssertionError): + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except py.builtin._sysex: + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = reinterpret(source, f, should_fail=True) + else: + self.msg = "" + if not self.args: + self.args = (self.msg,) + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" + reinterpret_old = "old reinterpretation not available for py3" +else: + from _pytest.assertion.oldinterpret import interpret as reinterpret_old +if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): + from _pytest.assertion.newinterpret import interpret as reinterpret +else: + reinterpret = reinterpret_old + diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion/rewrite.py @@ -0,0 +1,340 @@ +"""Rewrite assertion AST to produce nice error messages""" + +import ast +import collections +import itertools +import sys + +import py +from _pytest.assertion import util + + +def rewrite_asserts(mod): + """Rewrite the assert statements in mod.""" + AssertionRewriter().run(mod) + + +_saferepr = py.io.saferepr +from _pytest.assertion.util import format_explanation as _format_explanation + +def _format_boolop(operands, explanations, is_or): + show_explanations = [] + for operand, expl in zip(operands, explanations): + show_explanations.append(expl) + if operand == is_or: + break + return "(" + (is_or and " or " or " and ").join(show_explanations) + ")" + +def _call_reprcompare(ops, results, expls, each_obj): + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + +binop_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Pow : "**", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, lineno, col_offset) + return node + + +class AssertionRewriter(ast.NodeVisitor): + + def run(self, mod): + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + # Insert some special imports at the top of the module but after any + # docstrings and __future__ imports. + aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar")] + expect_docstring = True + pos = 0 + lineno = 0 + for item in mod.body: + if (expect_docstring and isinstance(item, ast.Expr) and + isinstance(item.value, ast.Str)): + doc = item.value.s + if "PYTEST_DONT_REWRITE" in doc: + # The module has disabled assertion rewriting. + return + lineno += len(doc) - 1 + expect_docstring = False + elif (not isinstance(item, ast.ImportFrom) or item.level > 0 and + item.identifier != "__future__"): + lineno = item.lineno + break + pos += 1 + imports = [ast.Import([alias], lineno=lineno, col_offset=0) + for alias in aliases] + mod.body[pos:pos] = imports + # Collect asserts. + nodes = collections.deque([mod]) + while nodes: + node = nodes.popleft() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif (isinstance(field, ast.AST) and + # Don't recurse into expressions as they can't contain + # asserts. + not isinstance(field, ast.expr)): + nodes.append(field) + + def variable(self): + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.add(name) + return name + + def assign(self, expr): + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr): + """Call py.io.saferepr on the expression.""" + return self.helper("saferepr", expr) + + def helper(self, name, *args): + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, "_" + name, ast.Load()) + return ast.Call(attr, list(args), [], None, None) + + def builtin(self, name): + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr): + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self): + self.explanation_specifiers = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr): + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node): + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_): + if assert_.msg: + # There's already a message. Don't mess with it. + return [assert_] + self.statements = [] + self.variables = set() + self.variable_counter = itertools.count() + self.stack = [] + self.on_failure = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + # Create failure message. + body = self.on_failure + negation = ast.UnaryOp(ast.Not(), top_condition) + self.statements.append(ast.If(negation, body, [])) + explanation = "assert " + explanation + template = ast.Str(explanation) + msg = self.pop_format_context(template) + fmt = self.helper("format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], [], None, None) + if sys.version_info[0] >= 3: + raise_ = ast.Raise(exc, None) + else: + raise_ = ast.Raise(exc, None, None) + body.append(raise_) + # Delete temporary variables. + names = [ast.Name(name, ast.Del()) for name in self.variables] + if names: + delete = ast.Delete(names) + self.statements.append(delete) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def visit_Name(self, name): + # Check if the name is local or not. + locs = ast.Call(self.builtin("locals"), [], [], None, None) + globs = ast.Call(self.builtin("globals"), [], [], None, None) + ops = [ast.In(), ast.IsNot()] + test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop): + operands = [] + explanations = [] + self.push_format_context() + for operand in boolop.values: + res, explanation = self.visit(operand) + operands.append(res) + explanations.append(explanation) + expls = ast.Tuple([ast.Str(expl) for expl in explanations], ast.Load()) + is_or = ast.Num(isinstance(boolop.op, ast.Or)) + expl_template = self.helper("format_boolop", + ast.Tuple(operands, ast.Load()), expls, + is_or) + expl = self.pop_format_context(expl_template) + res = self.assign(ast.BoolOp(boolop.op, operands)) + return res, self.explanation_param(expl) + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop): + symbol = binop_map[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call): + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + new_star = new_kwarg = None + for arg in call.args: + res, expl = self.visit(arg) + new_args.append(res) + arg_expls.append(expl) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + arg_expls.append(keyword.arg + "=" + expl) + if call.starargs: + new_star, expl = self.visit(call.starargs) + arg_expls.append("*" + expl) + if call.kwargs: + new_kwarg, expl = self.visit(call.kwarg) + arg_expls.append("**" + expl) + expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp): + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + results.append(next_res) + sym = binop_map[op.__class__] + syms.append(ast.Str(sym)) + expl = "%s %s %s" % (left_expl, sym, next_expl) + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use py.code._reprcompare if that's available. + expl_call = self.helper("call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load())) + if len(comp.ops) > 1: + res = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion/util.py @@ -0,0 +1,213 @@ +"""Utilities for assertion debugging""" + +import py + + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare = None + +def format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + # simplify 'assert False where False = ...' + where = 0 + while True: + start = where = explanation.find("False\n{False = ", where) + if where == -1: + break + level = 0 + for i, c in enumerate(explanation[start:]): + if c == "{": + level += 1 + elif c == "}": + level -= 1 + if not level: + break + else: + raise AssertionError("unbalanced braces: %r" % (explanation,)) + end = start + i + where = end + if explanation[end - 1] == '\n': + explanation = (explanation[:start] + explanation[start+15:end-1] + + explanation[end+1:]) + where -= 17 + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by {, } and ~ + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}') or l.startswith('~'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + elif line.startswith('}'): + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) + assert len(stack) == 1 + return '\n'.join(result) + + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def assertrepr_compare(op, left, right): + """return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = py.io.saferepr(left, maxsize=int(width/2)) + right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) + summary = '%s %s %s' % (left_repr, op, right_repr) + + issequence = lambda x: isinstance(x, (list, tuple)) + istext = lambda x: isinstance(x, basestring) + isdict = lambda x: isinstance(x, dict) + isset = lambda x: isinstance(x, set) + + explanation = None + try: + if op == '==': + if istext(left) and istext(right): + explanation = _diff_text(left, right) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right) + elif isdict(left) and isdict(right): + explanation = _diff_text(py.std.pprint.pformat(left), + py.std.pprint.pformat(right)) + elif op == 'not in': + if istext(left) and istext(right): + explanation = _notin_text(left, right) + except py.builtin._sysex: + raise + except: + excinfo = py.code.ExceptionInfo() + explanation = ['(pytest_assertion plugin: representation of ' + 'details failed. Probably an object has a faulty __repr__.)', + str(excinfo) + ] + + + if not explanation: + return None + + # Don't include pageloads of data, should be configurable + if len(''.join(explanation)) > 80*8: + explanation = ['Detailed information too verbose, truncated'] + + return [summary] + explanation + + +def _diff_text(left, right): + """Return the explanation for the diff between text + + This will skip leading and trailing characters which are + identical to keep the diff minimal. + """ + explanation = [] + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = ['Skipping %s identical ' + 'leading characters in diff' % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += ['Skipping %s identical ' + 'trailing characters in diff' % i] + left = left[:-i] + right = right[:-i] + explanation += [line.strip('\n') + for line in py.std.difflib.ndiff(left.splitlines(), + right.splitlines())] + return explanation + + +def _compare_eq_sequence(left, right): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += ['At index %s diff: %r != %r' % + (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += ['Left contains more items, ' + 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + elif len(left) < len(right): + explanation += ['Right contains more items, ' + 'first extra item: %s' % py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) + + +def _compare_eq_set(left, right): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append('Extra items in the left set:') + for item in diff_left: + explanation.append(py.io.saferepr(item)) + if diff_right: + explanation.append('Extra items in the right set:') + for item in diff_right: + explanation.append(py.io.saferepr(item)) + return explanation + + +def _notin_text(term, text): + index = text.find(term) + head = text[:index] + tail = text[index+len(term):] + correct_text = head + tail + diff = _diff_text(correct_text, text) + newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + for line in diff: + if line.startswith('Skipping'): + continue + if line.startswith('- '): + continue + if line.startswith('+ '): + newdiff.append(' ' + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -12,6 +12,10 @@ config.trace.root.setwriter(sys.stderr.write) return config +def pytest_unconfigure(config): + for func in config._cleanup: + func() + class Parser: """ Parser for command line arguments. """ @@ -251,7 +255,8 @@ self._conftest = Conftest(onimport=self._onimportconftest) self.hook = self.pluginmanager.hook self._inicache = {} - + self._cleanup = [] + @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ diff --git a/_pytest/core.py b/_pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -265,8 +265,15 @@ config.hook.pytest_unconfigure(config=config) config.pluginmanager.unregister(self) - def notify_exception(self, excinfo): - excrepr = excinfo.getrepr(funcargs=True, showlocals=True) + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr(funcargs=True, + showlocals=getattr(option, 'showlocals', False), + style=style, + ) res = self.hook.pytest_internalerror(excrepr=excrepr) if not py.builtin.any(res): for line in str(excrepr).split("\n"): diff --git a/_pytest/doctest.py b/_pytest/doctest.py --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -59,7 +59,7 @@ inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] - + lines += py.std.traceback.format_exception(*excinfo.value.exc_info) return ReprFailDoctest(reprlocation, lines) else: return super(DoctestItem, self).repr_failure(excinfo) diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -16,9 +16,6 @@ group.addoption('--traceconfig', action="store_true", dest="traceconfig", default=False, help="trace considerations of conftest.py files."), - group._addoption('--nomagic', - action="store_true", dest="nomagic", default=False, - help="don't reinterpret asserts, no traceback cutting. ") group.addoption('--debug', action="store_true", dest="debug", default=False, help="generate and show internal debugging information.") diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -5,8 +5,42 @@ import py import os +import re +import sys import time + +# Python 2.X and 3.X compatibility +try: + unichr(65) +except NameError: + unichr = chr +try: + unicode('A') +except NameError: + unicode = str +try: + long(1) +except NameError: + long = int + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19), + (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)] +_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _illegal_unichrs + if low < sys.maxunicode] +illegal_xml_re = re.compile(unicode('[%s]') % + unicode('').join(_illegal_ranges)) +del _illegal_unichrs +del _illegal_ranges + + def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption('--junitxml', action="store", dest="xmlpath", @@ -28,9 +62,11 @@ del config._xml config.pluginmanager.unregister(xml) + class LogXML(object): def __init__(self, logfile, prefix): - self.logfile = logfile + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(logfile) self.prefix = prefix self.test_logs = [] self.passed = self.skipped = 0 @@ -41,7 +77,7 @@ names = report.nodeid.split("::") names[0] = names[0].replace("/", '.') names = tuple(names) - d = {'time': self._durations.pop(names, "0")} + d = {'time': self._durations.pop(report.nodeid, "0")} names = [x.replace(".py", "") for x in names if x != "()"] classnames = names[:-1] if self.prefix: @@ -55,7 +91,14 @@ self.test_logs.append("") def appendlog(self, fmt, *args): - args = tuple([py.xml.escape(arg) for arg in args]) + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode('#x%02X') % i + else: + return unicode('#x%04X') % i + args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg)) + for arg in args]) self.test_logs.append(fmt % args) def append_pass(self, report): @@ -128,12 +171,11 @@ self.append_skipped(report) def pytest_runtest_call(self, item, __multicall__): - names = tuple(item.listnames()) start = time.time() try: return __multicall__.execute() finally: - self._durations[names] = time.time() - start + self._durations[item.nodeid] = time.time() - start def pytest_collectreport(self, report): if not report.passed: diff --git a/_pytest/main.py b/_pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -46,23 +46,25 @@ def pytest_namespace(): - return dict(collect=dict(Item=Item, Collector=Collector, File=File)) + collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) + return dict(collect=collect) def pytest_configure(config): py.test.config = config # compatibiltiy if config.option.exitfirst: config.option.maxfail = 1 -def pytest_cmdline_main(config): - """ default command line protocol for initialization, session, - running tests and reporting. """ +def wrap_session(config, doit): + """Skeleton command line program""" session = Session(config) session.exitstatus = EXIT_OK + initstate = 0 try: config.pluginmanager.do_configure(config) + initstate = 1 config.hook.pytest_sessionstart(session=session) - config.hook.pytest_collection(session=session) - config.hook.pytest_runtestloop(session=session) + initstate = 2 + doit(config, session) except pytest.UsageError: raise except KeyboardInterrupt: @@ -71,24 +73,30 @@ session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() - config.pluginmanager.notify_exception(excinfo) + config.pluginmanager.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") if not session.exitstatus and session._testsfailed: session.exitstatus = EXIT_TESTSFAILED - config.hook.pytest_sessionfinish(session=session, - exitstatus=session.exitstatus) - config.pluginmanager.do_unconfigure(config) + if initstate >= 2: + config.hook.pytest_sessionfinish(session=session, + exitstatus=session.exitstatus) + if initstate >= 1: + config.pluginmanager.do_unconfigure(config) return session.exitstatus +def pytest_cmdline_main(config): + return wrap_session(config, _main) + +def _main(config, session): + """ default command line protocol for initialization, session, + running tests and reporting. """ + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + def pytest_collection(session): - session.perform_collect() - hook = session.config.hook - hook.pytest_collection_modifyitems(session=session, - config=session.config, items=session.items) - hook.pytest_collection_finish(session=session) - return True + return session.perform_collect() def pytest_runtestloop(session): if session.config.option.collectonly: @@ -374,6 +382,16 @@ return HookProxy(fspath, self.config) def perform_collect(self, args=None, genitems=True): + hook = self.config.hook + try: + items = self._perform_collect(args, genitems) + hook.pytest_collection_modifyitems(session=self, + config=self.config, items=items) + finally: + hook.pytest_collection_finish(session=self) + return items + + def _perform_collect(self, args, genitems): if args is None: args = self.config.args self.trace("perform_collect", self, args) diff --git a/_pytest/mark.py b/_pytest/mark.py --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -153,7 +153,7 @@ def __repr__(self): return "" % ( - self._name, self.args, self.kwargs) + self.name, self.args, self.kwargs) def pytest_itemcollected(item): if not isinstance(item, pytest.Function): diff --git a/_pytest/pytester.py b/_pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -6,7 +6,7 @@ import inspect import time from fnmatch import fnmatch -from _pytest.main import Session +from _pytest.main import Session, EXIT_OK from py.builtin import print_ from _pytest.core import HookRelay @@ -236,13 +236,14 @@ def _makefile(self, ext, args, kwargs): items = list(kwargs.items()) if args: - source = "\n".join(map(str, args)) + "\n" + source = py.builtin._totext("\n").join( + map(py.builtin._totext, args)) + py.builtin._totext("\n") basename = self.request.function.__name__ items.insert(0, (basename, source)) ret = None for name, value in items: p = self.tmpdir.join(name).new(ext=ext) - source = str(py.code.Source(value)).lstrip() + source = py.builtin._totext(py.code.Source(value)).lstrip() p.write(source.encode("utf-8"), "wb") if ret is None: ret = p @@ -291,13 +292,19 @@ assert '::' not in str(arg) p = py.path.local(arg) x = session.fspath.bestrelpath(p) - return session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res def getpathnode(self, path): - config = self.parseconfig(path) + config = self.parseconfigure(path) session = Session(config) x = session.fspath.bestrelpath(path) - return session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res def genitems(self, colitems): session = colitems[0].session @@ -311,7 +318,9 @@ config = self.parseconfigure(*args) rec = self.getreportrecorder(config) session = Session(config) + config.hook.pytest_sessionstart(session=session) session.perform_collect() + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) return session.items, rec def runitem(self, source): @@ -381,6 +390,8 @@ c.basetemp = py.path.local.make_numbered_dir(prefix="reparse", keep=0, rootdir=self.tmpdir, lock_timeout=None) c.parse(args) + c.pluginmanager.do_configure(c) + self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c)) return c finally: py.test.config = oldconfig diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -226,8 +226,13 @@ def _importtestmodule(self): # we assume we are only called once per module + from _pytest import assertion + assertion.before_module_import(self) try: - mod = self.fspath.pyimport(ensuresyspath=True) + try: + mod = self.fspath.pyimport(ensuresyspath=True) + finally: + assertion.after_module_import(self) except SyntaxError: excinfo = py.code.ExceptionInfo() raise self.CollectError(excinfo.getrepr(style="short")) @@ -374,7 +379,7 @@ # test generators are seen as collectors but they also # invoke setup/teardown on popular request # (induced by the common "test_*" naming shared with normal tests) - self.config._setupstate.prepare(self) + self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj l = [] @@ -721,7 +726,7 @@ def _addfinalizer(self, finalizer, scope): colitem = self._getscopeitem(scope) - self.config._setupstate.addfinalizer( + self._pyfuncitem.session._setupstate.addfinalizer( finalizer=finalizer, colitem=colitem) def __repr__(self): @@ -742,8 +747,10 @@ raise self.LookupError(msg) def showfuncargs(config): - from _pytest.main import Session - session = Session(config) + from _pytest.main import wrap_session + return wrap_session(config, _showfuncargs_main) + +def _showfuncargs_main(config, session): session.perform_collect() if session.items: plugins = session.items[0].getplugins() diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr) + longrepr = str(report.longrepr[2]) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/_pytest/runner.py b/_pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -14,17 +14,15 @@ # # pytest plugin hooks -# XXX move to pytest_sessionstart and fix py.test owns tests -def pytest_configure(config): - config._setupstate = SetupState() +def pytest_sessionstart(session): + session._setupstate = SetupState() def pytest_sessionfinish(session, exitstatus): - if hasattr(session.config, '_setupstate'): - hook = session.config.hook - rep = hook.pytest__teardown_final(session=session) - if rep: - hook.pytest__teardown_final_logerror(session=session, report=rep) - session.exitstatus = 1 + hook = session.config.hook + rep = hook.pytest__teardown_final(session=session) + if rep: + hook.pytest__teardown_final_logerror(session=session, report=rep) + session.exitstatus = 1 class NodeInfo: def __init__(self, location): @@ -46,16 +44,16 @@ return reports def pytest_runtest_setup(item): - item.config._setupstate.prepare(item) + item.session._setupstate.prepare(item) def pytest_runtest_call(item): item.runtest() def pytest_runtest_teardown(item): - item.config._setupstate.teardown_exact(item) + item.session._setupstate.teardown_exact(item) def pytest__teardown_final(session): - call = CallInfo(session.config._setupstate.teardown_all, when="teardown") + call = CallInfo(session._setupstate.teardown_all, when="teardown") if call.excinfo: ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) call.excinfo.traceback = ntraceback.filter() diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -48,15 +48,12 @@ self.trace("finish") def pytest_configure(config): - config._mp = mp = monkeypatch() + mp = monkeypatch() t = TempdirHandler(config) + config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) -def pytest_unconfigure(config): - config._tmpdirhandler.finish() - config._mp.undo() - def pytest_funcarg__tmpdir(request): """return a temporary directory path object which is unique to each test function invocation, diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py --- a/ctypes_configure/cbuild.py +++ b/ctypes_configure/cbuild.py @@ -206,8 +206,9 @@ cfiles += eci.separate_module_files include_dirs = list(eci.include_dirs) library_dirs = list(eci.library_dirs) - if sys.platform == 'darwin': # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in include_dirs and \ os.path.exists(s + 'include'): include_dirs.append(s + 'include') @@ -380,9 +381,9 @@ self.link_extra += ['-pthread'] if sys.platform == 'win32': self.link_extra += ['/DEBUG'] # generate .pdb file - if sys.platform == 'darwin': - # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in self.include_dirs and \ os.path.exists(s + 'include'): self.include_dirs.append(s + 'include') @@ -395,7 +396,6 @@ self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) else: self.outputfilename = py.path.local(outputfilename) - self.eci = eci def build(self, noerr=False): basename = self.outputfilename.new(ext='') @@ -436,7 +436,7 @@ old = cfile.dirpath().chdir() try: res = compiler.compile([cfile.basename], - include_dirs=self.eci.include_dirs, + include_dirs=self.include_dirs, extra_preargs=self.compile_extra) assert len(res) == 1 cobjfile = py.path.local(res[0]) @@ -445,9 +445,9 @@ finally: old.chdir() compiler.link_executable(objects, str(self.outputfilename), - libraries=self.eci.libraries, + libraries=self.libraries, extra_preargs=self.link_extra, - library_dirs=self.eci.library_dirs) + library_dirs=self.library_dirs) def build_executable(*args, **kwds): noerr = kwds.pop('noerr', False) diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py --- a/ctypes_configure/configure.py +++ b/ctypes_configure/configure.py @@ -559,7 +559,9 @@ C_HEADER = """ #include #include /* for offsetof() */ -#include /* FreeBSD: for uint64_t */ +#ifndef _WIN32 +# include /* FreeBSD: for uint64_t */ +#endif void dump(char* key, int value) { printf("%s: %d\\n", key, value); diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py --- a/ctypes_configure/stdoutcapture.py +++ b/ctypes_configure/stdoutcapture.py @@ -15,6 +15,15 @@ not hasattr(os, 'fdopen')): self.dummy = 1 else: + try: + self.tmpout = os.tmpfile() + if mixed_out_err: + self.tmperr = self.tmpout + else: + self.tmperr = os.tmpfile() + except OSError: # bah? on at least one Windows box + self.dummy = 1 + return self.dummy = 0 # make new stdout/stderr files if needed self.localoutfd = os.dup(1) @@ -29,11 +38,6 @@ sys.stderr = os.fdopen(self.localerrfd, 'w', 0) else: self.saved_stderr = None - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() os.dup2(self.tmpout.fileno(), 1) os.dup2(self.tmperr.fileno(), 2) diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -36,48 +36,45 @@ print >> sys.stderr, "Warning: could not guess file type, using 'dot'" return 'unknown' -def dot2plain(content, contenttype, use_codespeak=False): - if contenttype == 'plain': - # already a .plain file - return content +def dot2plain_graphviz(content, contenttype, use_codespeak=False): + if contenttype != 'neato': + cmdline = 'dot -Tplain' + else: + cmdline = 'neato -Tplain' + #print >> sys.stderr, '* running:', cmdline + close_fds = sys.platform != 'win32' + p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + (child_in, child_out) = (p.stdin, p.stdout) + try: + import thread + except ImportError: + bkgndwrite(child_in, content) + else: + thread.start_new_thread(bkgndwrite, (child_in, content)) + plaincontent = child_out.read() + child_out.close() + if not plaincontent: # 'dot' is likely not installed + raise PlainParseError("no result from running 'dot'") + return plaincontent - if not use_codespeak: - if contenttype != 'neato': - cmdline = 'dot -Tplain' - else: - cmdline = 'neato -Tplain' - #print >> sys.stderr, '* running:', cmdline - close_fds = sys.platform != 'win32' - p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds, - stdin=subprocess.PIPE, stdout=subprocess.PIPE) - (child_in, child_out) = (p.stdin, p.stdout) - try: - import thread - except ImportError: - bkgndwrite(child_in, content) - else: - thread.start_new_thread(bkgndwrite, (child_in, content)) - plaincontent = child_out.read() - child_out.close() - if not plaincontent: # 'dot' is likely not installed - raise PlainParseError("no result from running 'dot'") - else: - import urllib - request = urllib.urlencode({'dot': content}) - url = 'http://codespeak.net/pypy/convertdot.cgi' - print >> sys.stderr, '* posting:', url - g = urllib.urlopen(url, data=request) - result = [] - while True: - data = g.read(16384) - if not data: - break - result.append(data) - g.close() - plaincontent = ''.join(result) - # very simple-minded way to give a somewhat better error message - if plaincontent.startswith('> sys.stderr, '* posting:', url + g = urllib.urlopen(url, data=request) + result = [] + while True: + data = g.read(16384) + if not data: + break + result.append(data) + g.close() + plaincontent = ''.join(result) + # very simple-minded way to give a somewhat better error message + if plaincontent.startswith(' H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.3" - -__all__ = ["HTTPServer", "BaseHTTPRequestHandler"] - -import sys -import time -import socket # For gethostbyaddr() -import mimetools -import SocketServer - -# Default error message -DEFAULT_ERROR_MESSAGE = """\ - -Error response - - -

    Error response

    -

    Error code %(code)d. -

    Message: %(message)s. -

    Error code explanation: %(code)s = %(explain)s. - -""" - -def _quote_html(html): - return html.replace("&", "&").replace("<", "<").replace(">", ">") - -class HTTPServer(SocketServer.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - SocketServer.TCPServer.server_bind(self) - host, port = self.socket.getsockname()[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of mimetools.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, an - error is sent back. - - """ - self.command = None # set in case of error on the first line - self.request_version = version = "HTTP/0.9" # Default - self.close_connection = 1 - requestline = self.raw_requestline - if requestline[-2:] == '\r\n': - requestline = requestline[:-2] - elif requestline[-1:] == '\n': - requestline = requestline[:-1] - self.requestline = requestline - words = requestline.split() - if len(words) == 3: - [command, path, version] = words - if version[:5] != 'HTTP/': - self.send_error(400, "Bad request version (%r)" % version) - return False - try: - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error(400, "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = 0 - if version_number >= (2, 0): - self.send_error(505, - "Invalid HTTP Version (%s)" % base_version_number) - return False - elif len(words) == 2: - [command, path] = words - self.close_connection = 1 - if command != 'GET': - self.send_error(400, - "Bad HTTP/0.9 request type (%r)" % command) - return False - elif not words: - return False - else: - self.send_error(400, "Bad request syntax (%r)" % requestline) - return False - self.command, self.path, self.request_version = command, path, version - - # Examine the headers and look for a Connection directive - self.headers = self.MessageClass(self.rfile, 0) - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = 1 - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = 0 - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - self.raw_requestline = self.rfile.readline() - if not self.raw_requestline: - self.close_connection = 1 - return - if not self.parse_request(): # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error(501, "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = 1 - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None): - """Send and log an error reply. - - Arguments are the error code, and a detailed message. - The detailed message defaults to the short entry matching the - response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - short, long = self.responses[code] - except KeyError: - short, long = '???', '???' - if message is None: - message = short - explain = long - self.log_error("code %d, message %s", code, message) - # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) - content = (self.error_message_format % - {'code': code, 'message': _quote_html(message), 'explain': explain}) - self.send_response(code, message) - self.send_header("Content-Type", "text/html") - self.send_header('Connection', 'close') - self.end_headers() - if self.command != 'HEAD' and code >= 200 and code not in (204, 304): - self.wfile.write(content) - - error_message_format = DEFAULT_ERROR_MESSAGE - - def send_response(self, code, message=None): - """Send the response header and log the response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s %d %s\r\n" % - (self.protocol_version, code, message)) - # print (self.protocol_version, code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_header(self, keyword, value): - """Send a MIME header.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s: %s\r\n" % (keyword, value)) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = 1 - elif value.lower() == 'keep-alive': - self.close_connection = 0 - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("\r\n") - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(*args) - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client host and current date/time are prefixed to - every message. - - """ - - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - format%args)) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) - s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( - self.weekdayname[wd], - day, self.monthname[month], year, - hh, mm, ss) - return s - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address formatted for logging. - - This version looks up the full hostname using gethostbyaddr(), - and tries to find a name that contains at least one dot. - - """ - - host, port = self.client_address[:2] - return socket.getfqdn(host) - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # The Message-like class used to parse headers - MessageClass = mimetools.Message - - # Table mapping response codes to messages; entries have the - # form {code: (shortmessage, longmessage)}. - # See RFC 2616. - responses = { - 100: ('Continue', 'Request received, please continue'), - 101: ('Switching Protocols', - 'Switching to new protocol; obey Upgrade header'), - - 200: ('OK', 'Request fulfilled, document follows'), - 201: ('Created', 'Document created, URL follows'), - 202: ('Accepted', - 'Request accepted, processing continues off-line'), - 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), - 204: ('No Content', 'Request fulfilled, nothing follows'), - 205: ('Reset Content', 'Clear input form for further input.'), - 206: ('Partial Content', 'Partial content follows.'), - - 300: ('Multiple Choices', - 'Object has several resources -- see URI list'), - 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), - 302: ('Found', 'Object moved temporarily -- see URI list'), - 303: ('See Other', 'Object moved -- see Method and URL list'), - 304: ('Not Modified', - 'Document has not changed since given time'), - 305: ('Use Proxy', - 'You must use proxy specified in Location to access this ' - 'resource.'), - 307: ('Temporary Redirect', - 'Object moved temporarily -- see URI list'), - - 400: ('Bad Request', - 'Bad request syntax or unsupported method'), - 401: ('Unauthorized', - 'No permission -- see authorization schemes'), - 402: ('Payment Required', - 'No payment -- see charging schemes'), - 403: ('Forbidden', - 'Request forbidden -- authorization will not help'), - 404: ('Not Found', 'Nothing matches the given URI'), - 405: ('Method Not Allowed', - 'Specified method is invalid for this server.'), - 406: ('Not Acceptable', 'URI not available in preferred format.'), - 407: ('Proxy Authentication Required', 'You must authenticate with ' - 'this proxy before proceeding.'), - 408: ('Request Timeout', 'Request timed out; try again later.'), - 409: ('Conflict', 'Request conflict.'), - 410: ('Gone', - 'URI no longer exists and has been permanently removed.'), - 411: ('Length Required', 'Client must specify Content-Length.'), - 412: ('Precondition Failed', 'Precondition in headers is false.'), - 413: ('Request Entity Too Large', 'Entity is too large.'), - 414: ('Request-URI Too Long', 'URI is too long.'), - 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), - 416: ('Requested Range Not Satisfiable', - 'Cannot satisfy request range.'), - 417: ('Expectation Failed', - 'Expect condition could not be satisfied.'), - - 500: ('Internal Server Error', 'Server got itself in trouble'), - 501: ('Not Implemented', - 'Server does not support this operation'), - 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), - 503: ('Service Unavailable', - 'The server cannot process the request due to a high load'), - 504: ('Gateway Timeout', - 'The gateway server did not receive a timely response'), - 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), - } - - -def test(HandlerClass = BaseHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the first command line - argument). - - """ - - if sys.argv[1:]: - port = int(sys.argv[1]) - else: - port = 8000 - server_address = ('', port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." - httpd.serve_forever() - - -if __name__ == '__main__': - test() diff --git a/lib-python/2.5.2/Bastion.py b/lib-python/2.5.2/Bastion.py deleted file mode 100644 --- a/lib-python/2.5.2/Bastion.py +++ /dev/null @@ -1,177 +0,0 @@ -"""Bastionification utility. - -A bastion (for another object -- the 'original') is an object that has -the same methods as the original but does not give access to its -instance variables. Bastions have a number of uses, but the most -obvious one is to provide code executing in restricted mode with a -safe interface to an object implemented in unrestricted mode. - -The bastionification routine has an optional second argument which is -a filter function. Only those methods for which the filter method -(called with the method name as argument) returns true are accessible. -The default filter method returns true unless the method name begins -with an underscore. - -There are a number of possible implementations of bastions. We use a -'lazy' approach where the bastion's __getattr__() discipline does all -the work for a particular method the first time it is used. This is -usually fastest, especially if the user doesn't call all available -methods. The retrieved methods are stored as instance variables of -the bastion, so the overhead is only occurred on the first use of each -method. - -Detail: the bastion class has a __repr__() discipline which includes -the repr() of the original object. This is precomputed when the -bastion is created. - -""" - -__all__ = ["BastionClass", "Bastion"] - -from types import MethodType - - -class BastionClass: - - """Helper class used by the Bastion() function. - - You could subclass this and pass the subclass as the bastionclass - argument to the Bastion() function, as long as the constructor has - the same signature (a get() function and a name for the object). - - """ - - def __init__(self, get, name): - """Constructor. - - Arguments: - - get - a function that gets the attribute value (by name) - name - a human-readable name for the original object - (suggestion: use repr(object)) - - """ - self._get_ = get - self._name_ = name - - def __repr__(self): - """Return a representation string. - - This includes the name passed in to the constructor, so that - if you print the bastion during debugging, at least you have - some idea of what it is. - - """ - return "" % self._name_ - - def __getattr__(self, name): - """Get an as-yet undefined attribute value. - - This calls the get() function that was passed to the - constructor. The result is stored as an instance variable so - that the next time the same attribute is requested, - __getattr__() won't be invoked. - - If the get() function raises an exception, this is simply - passed on -- exceptions are not cached. - - """ - attribute = self._get_(name) - self.__dict__[name] = attribute - return attribute - - -def Bastion(object, filter = lambda name: name[:1] != '_', - name=None, bastionclass=BastionClass): - """Create a bastion for an object, using an optional filter. - - See the Bastion module's documentation for background. - - Arguments: - - object - the original object - filter - a predicate that decides whether a function name is OK; - by default all names are OK that don't start with '_' - name - the name of the object; default repr(object) - bastionclass - class used to create the bastion; default BastionClass - - """ - - raise RuntimeError, "This code is not secure in Python 2.2 and later" - - # Note: we define *two* ad-hoc functions here, get1 and get2. - # Both are intended to be called in the same way: get(name). - # It is clear that the real work (getting the attribute - # from the object and calling the filter) is done in get1. - # Why can't we pass get1 to the bastion? Because the user - # would be able to override the filter argument! With get2, - # overriding the default argument is no security loophole: - # all it does is call it. - # Also notice that we can't place the object and filter as - # instance variables on the bastion object itself, since - # the user has full access to all instance variables! - - def get1(name, object=object, filter=filter): - """Internal function for Bastion(). See source comments.""" - if filter(name): - attribute = getattr(object, name) - if type(attribute) == MethodType: - return attribute - raise AttributeError, name - - def get2(name, get1=get1): - """Internal function for Bastion(). See source comments.""" - return get1(name) - - if name is None: - name = repr(object) - return bastionclass(get2, name) - - -def _test(): - """Test the Bastion() function.""" - class Original: - def __init__(self): - self.sum = 0 - def add(self, n): - self._add(n) - def _add(self, n): - self.sum = self.sum + n - def total(self): - return self.sum - o = Original() - b = Bastion(o) - testcode = """if 1: - b.add(81) - b.add(18) - print "b.total() =", b.total() - try: - print "b.sum =", b.sum, - except: - print "inaccessible" - else: - print "accessible" - try: - print "b._add =", b._add, - except: - print "inaccessible" - else: - print "accessible" - try: - print "b._get_.func_defaults =", map(type, b._get_.func_defaults), - except: - print "inaccessible" - else: - print "accessible" - \n""" - exec testcode - print '='*20, "Using rexec:", '='*20 - import rexec - r = rexec.RExec() - m = r.add_module('__main__') - m.b = b - r.r_exec(testcode) - - -if __name__ == '__main__': - _test() diff --git a/lib-python/2.5.2/CGIHTTPServer.py b/lib-python/2.5.2/CGIHTTPServer.py deleted file mode 100644 --- a/lib-python/2.5.2/CGIHTTPServer.py +++ /dev/null @@ -1,362 +0,0 @@ -"""CGI-savvy HTTP Server. - -This module builds on SimpleHTTPServer by implementing GET and POST -requests to cgi-bin scripts. - -If the os.fork() function is not present (e.g. on Windows), -os.popen2() is used as a fallback, with slightly altered semantics; if -that function is not present either (e.g. on Macintosh), only Python -scripts are supported, and they are executed by the current process. - -In all cases, the implementation is intentionally naive -- all -requests are executed sychronously. - -SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL --- it may execute arbitrary Python code or external programs. - -Note that status code 200 is sent prior to execution of a CGI script, so -scripts cannot send other status codes such as 302 (redirect). -""" - - -__version__ = "0.4" - -__all__ = ["CGIHTTPRequestHandler"] - -import os -import sys -import urllib -import BaseHTTPServer -import SimpleHTTPServer -import select - - -class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): - - """Complete HTTP server with GET, HEAD and POST commands. - - GET and HEAD also support running CGI scripts. - - The POST command is *only* implemented for CGI scripts. - - """ - - # Determine platform specifics - have_fork = hasattr(os, 'fork') - have_popen2 = hasattr(os, 'popen2') - have_popen3 = hasattr(os, 'popen3') - - # Make rfile unbuffered -- we need to read one line and then pass - # the rest to a subprocess, so we can't use buffered input. - rbufsize = 0 - - def do_POST(self): - """Serve a POST request. - - This is only implemented for CGI scripts. - - """ - - if self.is_cgi(): - self.run_cgi() - else: - self.send_error(501, "Can only POST to CGI scripts") - - def send_head(self): - """Version of send_head that support CGI scripts""" - if self.is_cgi(): - return self.run_cgi() - else: - return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self) - - def is_cgi(self): - """Test whether self.path corresponds to a CGI script. - - Return a tuple (dir, rest) if self.path requires running a - CGI script, None if not. Note that rest begins with a - slash if it is not empty. - - The default implementation tests whether the path - begins with one of the strings in the list - self.cgi_directories (and the next character is a '/' - or the end of the string). - - """ - - path = self.path - - for x in self.cgi_directories: - i = len(x) - if path[:i] == x and (not path[i:] or path[i] == '/'): - self.cgi_info = path[:i], path[i+1:] - return True - return False - - cgi_directories = ['/cgi-bin', '/htbin'] - - def is_executable(self, path): - """Test whether argument path is an executable file.""" - return executable(path) - - def is_python(self, path): - """Test whether argument path is a Python script.""" - head, tail = os.path.splitext(path) - return tail.lower() in (".py", ".pyw") - - def run_cgi(self): - """Execute a CGI script.""" - path = self.path - dir, rest = self.cgi_info - - i = path.find('/', len(dir) + 1) - while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] - - scriptdir = self.translate_path(nextdir) - if os.path.isdir(scriptdir): - dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) - else: - break - - # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' - - # dissect the part after the directory name into a script name & - # a possible additional path, to be stored in PATH_INFO. - i = rest.find('/') - if i >= 0: - script, rest = rest[:i], rest[i:] - else: - script, rest = rest, '' - - scriptname = dir + '/' + script - scriptfile = self.translate_path(scriptname) - if not os.path.exists(scriptfile): - self.send_error(404, "No such CGI script (%r)" % scriptname) - return - if not os.path.isfile(scriptfile): - self.send_error(403, "CGI script is not a plain file (%r)" % - scriptname) - return - ispy = self.is_python(scriptname) - if not ispy: - if not (self.have_fork or self.have_popen2 or self.have_popen3): - self.send_error(403, "CGI script is not a Python script (%r)" % - scriptname) - return - if not self.is_executable(scriptfile): - self.send_error(403, "CGI script is not executable (%r)" % - scriptname) - return - - # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html - # XXX Much of the following could be prepared ahead of time! - env = {} - env['SERVER_SOFTWARE'] = self.version_string() - env['SERVER_NAME'] = self.server.server_name - env['GATEWAY_INTERFACE'] = 'CGI/1.1' - env['SERVER_PROTOCOL'] = self.protocol_version - env['SERVER_PORT'] = str(self.server.server_port) - env['REQUEST_METHOD'] = self.command - uqrest = urllib.unquote(rest) - env['PATH_INFO'] = uqrest - env['PATH_TRANSLATED'] = self.translate_path(uqrest) - env['SCRIPT_NAME'] = scriptname - if query: - env['QUERY_STRING'] = query - host = self.address_string() - if host != self.client_address[0]: - env['REMOTE_HOST'] = host - env['REMOTE_ADDR'] = self.client_address[0] - authorization = self.headers.getheader("authorization") - if authorization: - authorization = authorization.split() - if len(authorization) == 2: - import base64, binascii - env['AUTH_TYPE'] = authorization[0] - if authorization[0].lower() == "basic": - try: - authorization = base64.decodestring(authorization[1]) - except binascii.Error: - pass - else: - authorization = authorization.split(':') - if len(authorization) == 2: - env['REMOTE_USER'] = authorization[0] - # XXX REMOTE_IDENT - if self.headers.typeheader is None: - env['CONTENT_TYPE'] = self.headers.type - else: - env['CONTENT_TYPE'] = self.headers.typeheader - length = self.headers.getheader('content-length') - if length: - env['CONTENT_LENGTH'] = length - accept = [] - for line in self.headers.getallmatchingheaders('accept'): - if line[:1] in "\t\n\r ": - accept.append(line.strip()) - else: - accept = accept + line[7:].split(',') - env['HTTP_ACCEPT'] = ','.join(accept) - ua = self.headers.getheader('user-agent') - if ua: - env['HTTP_USER_AGENT'] = ua - co = filter(None, self.headers.getheaders('cookie')) - if co: - env['HTTP_COOKIE'] = ', '.join(co) - # XXX Other HTTP_* headers - # Since we're setting the env in the parent, provide empty - # values to override previously set values - for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', - 'HTTP_USER_AGENT', 'HTTP_COOKIE'): - env.setdefault(k, "") - os.environ.update(env) - - self.send_response(200, "Script output follows") - - decoded_query = query.replace('+', ' ') - - if self.have_fork: - # Unix -- fork as we should - args = [script] - if '=' not in decoded_query: - args.append(decoded_query) - nobody = nobody_uid() - self.wfile.flush() # Always flush before forking - pid = os.fork() - if pid != 0: - # Parent - pid, sts = os.waitpid(pid, 0) - # throw away additional data [see bug #427345] - while select.select([self.rfile], [], [], 0)[0]: - if not self.rfile.read(1): - break - if sts: - self.log_error("CGI script exit status %#x", sts) - return - # Child - try: - try: - os.setuid(nobody) - except os.error: - pass - os.dup2(self.rfile.fileno(), 0) - os.dup2(self.wfile.fileno(), 1) - os.execve(scriptfile, args, os.environ) - except: - self.server.handle_error(self.request, self.client_address) - os._exit(127) - - elif self.have_popen2 or self.have_popen3: - # Windows -- use popen2 or popen3 to create a subprocess - import shutil - if self.have_popen3: - popenx = os.popen3 - else: - popenx = os.popen2 - cmdline = scriptfile - if self.is_python(scriptfile): - interp = sys.executable - if interp.lower().endswith("w.exe"): - # On Windows, use python.exe, not pythonw.exe - interp = interp[:-5] + interp[-4:] - cmdline = "%s -u %s" % (interp, cmdline) - if '=' not in query and '"' not in query: - cmdline = '%s "%s"' % (cmdline, query) - self.log_message("command: %s", cmdline) - try: - nbytes = int(length) - except (TypeError, ValueError): - nbytes = 0 - files = popenx(cmdline, 'b') - fi = files[0] - fo = files[1] - if self.have_popen3: - fe = files[2] - if self.command.lower() == "post" and nbytes > 0: - data = self.rfile.read(nbytes) - fi.write(data) - # throw away additional data [see bug #427345] - while select.select([self.rfile._sock], [], [], 0)[0]: - if not self.rfile._sock.recv(1): - break - fi.close() - shutil.copyfileobj(fo, self.wfile) - if self.have_popen3: - errors = fe.read() - fe.close() - if errors: - self.log_error('%s', errors) - sts = fo.close() - if sts: - self.log_error("CGI script exit status %#x", sts) - else: - self.log_message("CGI script exited OK") - - else: - # Other O.S. -- execute script in this process - save_argv = sys.argv - save_stdin = sys.stdin - save_stdout = sys.stdout - save_stderr = sys.stderr - try: - save_cwd = os.getcwd() - try: - sys.argv = [scriptfile] - if '=' not in decoded_query: - sys.argv.append(decoded_query) - sys.stdout = self.wfile - sys.stdin = self.rfile - execfile(scriptfile, {"__name__": "__main__"}) - finally: - sys.argv = save_argv - sys.stdin = save_stdin - sys.stdout = save_stdout - sys.stderr = save_stderr - os.chdir(save_cwd) - except SystemExit, sts: - self.log_error("CGI script exit status %s", str(sts)) - else: - self.log_message("CGI script exited OK") - - -nobody = None - -def nobody_uid(): - """Internal routine to get nobody's uid""" - global nobody - if nobody: - return nobody - try: - import pwd - except ImportError: - return -1 - try: - nobody = pwd.getpwnam('nobody')[2] - except KeyError: - nobody = 1 + max(map(lambda x: x[2], pwd.getpwall())) - return nobody - - -def executable(path): - """Test for executable file.""" - try: - st = os.stat(path) - except os.error: - return False - return st.st_mode & 0111 != 0 - - -def test(HandlerClass = CGIHTTPRequestHandler, - ServerClass = BaseHTTPServer.HTTPServer): - SimpleHTTPServer.test(HandlerClass, ServerClass) - - -if __name__ == '__main__': - test() diff --git a/lib-python/2.5.2/ConfigParser.py b/lib-python/2.5.2/ConfigParser.py deleted file mode 100644 --- a/lib-python/2.5.2/ConfigParser.py +++ /dev/null @@ -1,640 +0,0 @@ -"""Configuration file parser. - -A setup file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. - -The option values can contain format strings which refer to other values in -the same section, or values in a special [DEFAULT] section. - -For example: - - something: %(dir)s/whatever - -would resolve the "%(dir)s" to the value of dir. All reference -expansions are done late, on demand. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None) - create the parser and specify a dictionary of intrinsic defaults. The - keys must be strings, the values must be appropriate for %()s string - interpolation. Note that `__name__' is always an intrinsic default; - its value is the section's name. - - sections() - return all the configuration section names, sans DEFAULT - - has_section(section) - return whether the given section exists - - has_option(section, option) - return whether the given option exists in the given section - - options(section) - return list of configuration options for the named section - - read(filenames) - read and parse the list of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - readfp(fp, filename=None) - read and parse one configuration file, given as a file object. - The filename defaults to fp.name; it is only used in error - messages (if fp has no `name' attribute, the string `' is used). - - get(section, option, raw=False, vars=None) - return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. - - getint(section, options) - like get(), but convert value to an integer - - getfloat(section, options) - like get(), but convert value to a float - - getboolean(section, options) - like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section, raw=False, vars=None) - return a list of tuples with (name, value) for each option - in the section. - - remove_section(section) - remove the given file section and all its options - - remove_option(section, option) - remove the given option from the given section - - set(section, option, value) - set the given option - - write(fp) - write the configuration state in .ini format -""" - -import re - -__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError", - "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - -class DuplicateSectionError(Error): - """Raised when a section is multiply-created.""" - - def __init__(self, section): - Error.__init__(self, "Section %r already exists" % section) - self.section = section - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\tkey : %s\n" - "\trawval : %s\n" - % (section, option, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text into which substitutions are made - does not conform to the required syntax.""" - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Value interpolation too deeply recursive:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\trawval : %s\n" - % (section, option, rawval)) - InterpolationError.__init__(self, option, section, msg) - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, filename): - Error.__init__(self, 'File contains parsing errors: %s' % filename) - self.filename = filename - self.errors = [] - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %s, line: %d\n%r' % - (filename, lineno, line)) - self.filename = filename - self.lineno = lineno - self.line = line - - - -class RawConfigParser: - def __init__(self, defaults=None): - self._sections = {} - self._defaults = {} - if defaults: - for key, value in defaults.items(): - self._defaults[self.optionxform(key)] = value - - def defaults(self): - return self._defaults - - def sections(self): - """Return a list of section names, excluding [DEFAULT]""" - # self._sections will never have [DEFAULT] in it - return self._sections.keys() - - def add_section(self, section): - """Create a new section in the configuration. - - Raise DuplicateSectionError if a section by the specified name - already exists. - """ - if section in self._sections: - raise DuplicateSectionError(section) - self._sections[section] = {} - - def has_section(self, section): - """Indicate whether the named section is present in the configuration. - - The DEFAULT section is not acknowledged. - """ - return section in self._sections - - def options(self, section): - """Return a list of option names for the given section name.""" - try: - opts = self._sections[section].copy() - except KeyError: - raise NoSectionError(section) - opts.update(self._defaults) - if '__name__' in opts: - del opts['__name__'] - return opts.keys() - - def read(self, filenames): - """Read and parse a filename or a list of filenames. - - Files that cannot be opened are silently ignored; this is - designed so that you can specify a list of potential - configuration file locations (e.g. current directory, user's - home directory, systemwide directory), and all existing - configuration files in the list will be read. A single - filename may also be given. - - Return list of successfully read files. - """ - if isinstance(filenames, basestring): - filenames = [filenames] - read_ok = [] - for filename in filenames: - try: - fp = open(filename) - except IOError: - continue - self._read(fp, filename) - fp.close() - read_ok.append(filename) - return read_ok - - def readfp(self, fp, filename=None): - """Like read() but the argument must be a file-like object. - - The `fp' argument must have a `readline' method. Optional - second argument is the `filename', which if not given, is - taken from fp.name. If fp has no `name' attribute, `' is - used. - - """ - if filename is None: - try: - filename = fp.name - except AttributeError: - filename = '' - self._read(fp, filename) - - def get(self, section, option): - opt = self.optionxform(option) - if section not in self._sections: - if section != DEFAULTSECT: - raise NoSectionError(section) - if opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - elif opt in self._sections[section]: - return self._sections[section][opt] - elif opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - - def items(self, section): - try: - d2 = self._sections[section] - except KeyError: - if section != DEFAULTSECT: - raise NoSectionError(section) - d2 = {} - d = self._defaults.copy() - d.update(d2) - if "__name__" in d: - del d["__name__"] - return d.items() - - def _get(self, section, conv, option): - return conv(self.get(section, option)) - - def getint(self, section, option): - return self._get(section, int, option) - - def getfloat(self, section, option): - return self._get(section, float, option) - - _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - def getboolean(self, section, option): - v = self.get(section, option) - if v.lower() not in self._boolean_states: - raise ValueError, 'Not a boolean: %s' % v - return self._boolean_states[v.lower()] - - def optionxform(self, optionstr): - return optionstr.lower() - - def has_option(self, section, option): - """Check for the existence of a given option in a given section.""" - if not section or section == DEFAULTSECT: - option = self.optionxform(option) - return option in self._defaults - elif section not in self._sections: - return False - else: - option = self.optionxform(option) - return (option in self._sections[section] - or option in self._defaults) - - def set(self, section, option, value): - """Set an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - sectdict[self.optionxform(option)] = value - - def write(self, fp): - """Write an .ini-format representation of the configuration state.""" - if self._defaults: - fp.write("[%s]\n" % DEFAULTSECT) - for (key, value) in self._defaults.items(): - fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - for section in self._sections: - fp.write("[%s]\n" % section) - for (key, value) in self._sections[section].items(): - if key != "__name__": - fp.write("%s = %s\n" % - (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - - def remove_option(self, section, option): - """Remove an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - option = self.optionxform(option) - existed = option in sectdict - if existed: - del sectdict[option] - return existed - - def remove_section(self, section): - """Remove a file section.""" - existed = section in self._sections - if existed: - del self._sections[section] - return existed - - # - # Regular expressions for parsing section headers and options. - # - SECTCRE = re.compile( - r'\[' # [ - r'(?P

    [^]]+)' # very permissive! - r'\]' # ] - ) - OPTCRE = re.compile( - r'(?P

  • - +
    diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -38,7 +38,7 @@

    - +
    diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -38,7 +38,7 @@

    - +
    diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -1,5 +1,6 @@

    - +
    diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -38,7 +38,7 @@

    - +
    diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -38,7 +38,7 @@

    - +
    diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -38,7 +38,7 @@

    - +
    diff --git a/js/script2.js b/js/script2.js --- a/js/script2.js +++ b/js/script2.js @@ -5,6 +5,12 @@ }); } +function stm_donate() { + $.get("don4.html#", function (html) { + $("#sidebar").html(html); + }); +} + function general_donate() { $.get("don2.html#", function (html) { $("#sidebar").html(html); @@ -18,5 +24,5 @@ } $(document).ready(function() { - numpy_donate(); + stm_donate(); }); \ No newline at end of file diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -38,7 +38,7 @@

    - +
    diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -38,7 +38,7 @@

    - +
    diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -38,7 +38,7 @@

    - +
    diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -38,7 +38,7 @@

    - +
    diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -15,6 +15,7 @@ ('Contact', 'contact.html'), ('Py3k donations', 'py3donate.html'), ('NumPy donations', 'numpydonate.html'), + ('STM donations', 'tmdonate.html'), ], } diff --git a/source/tmdonate.txt b/source/tmdonate.txt new file mode 100644 --- /dev/null +++ b/source/tmdonate.txt @@ -0,0 +1,389 @@ +--- +layout: page +title: Call for donations - Transactional Memory in PyPy +--- + +==================== +Transactional Memory +==================== + + +Introduction +============ + +In the presence of today's machines with multiple processors, Python +progress is lagging behind: on any CPU-constrained program, developers +have a difficult choice to make. They can use in-process solutions that +do not offer multi-CPU usage. In this respect, the natural choice +nowadays is to use Twisted or other event-based paradigms, or systems +that hide events in the control flow, like Stackless; or alternatively, +they can use the existing ``threading`` module, with its associated GIL +and the complexities of real multi-threaded programming (locks, +deadlocks, races, etc.), which make this solution less attractive. The +big alternative is for them to rely on one of various multi-process +solutions that are outside the scope of the core language; all of them +in some way or another are hacks that require extra knowledge and time +to use and that have an impact on the structure of the whole program. + +This proposal is about researching and implementing Transactional Memory +in PyPy. This is a technique that recently came to the front of the +multi-core scene. It promizes to offer multi-core CPU usage without +requiring to fall back to the multi-process solutions described above, +and also without using the ``threading`` module --- just as a small, +local extension of the programming language that would be used only in +the core of the event loops. + +(Jump directly to `What Python interface will I use?`_ for practical +details.) + + +In more details +=============== + +This is a call for financial help in researching and implementing a +version of PyPy able to use multiple processors in a single process. +This will give a GIL-less Python, i.e. a Python that runs without the +infamous Global Interpreter Lock. + +The basic ideas to do it have been discussed on pypy-dev `[1]`__ `[2]`__ +and on a blog post `[3]`__. +The goal is to adapt Transactional Memory --- currently only available +as software, but `soon available as hardware`_ --- to the task of running +sections of Python code in parallel on multiple processors, while giving +the programmer the illusion of serial execution. It is called below +"PyPy-TM". + +.. __: http://mail.python.org/pipermail/pypy-dev/2011-August/008153.html +.. __: http://mail.python.org/pipermail/pypy-dev/2012-January/009034.html +.. __: http://morepypy.blogspot.com/2012/01/transactional-memory-ii.html + +The main developer will be Armin Rigo. +This is a "researchy" goal in the sense of us not being quite sure of +the performance of the result. We currently estimate the one-year +performance goal at 2x-to-5x slower than regular PyPy in fully serial +applications. We feel confident that it can work, though, in the +following sense: the performance of PyPy-TM running suited applications +should scale linearly or close-to-linearly with the number of processors. +This means that you just need a machine with at least 4 or 8 processors, +which is already common today and will be even more so in one or two +years. + +You will find below a sketch of the `work plan`_. If more money than +requested is collected, then the excess will be entered into the general +PyPy pot, used for example to finance sprint travel costs to students. + +**Note** For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means. + + +What is the Global Interpreter Lock? +------------------------------------ + +The GIL, or Global Interpreter Lock, is a single lock in both CPython +and (so far) PyPy, that all threads must acquire in order to execute +Python bytecodes. This means that so far, in Python, even when using +threads we do not gain any benefit in term of multicore performance. + + +What is Transactional Memory? +----------------------------- + +`Transactional Memory`_ (TM) is a technique imported from databases: every +time we want to do a change to the processors' main memory, we do it in +a "transaction". Multiple transactions can be executed in parallel by +multiple cores. When a transaction is complete, we try to commit it. +This might either succeed, or (if another transaction committed +incompatible changes) fail. If it fails, which is hopefully rare, we +need to restart the transaction from scratch. + +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory + + +Why hasn't the idea been implemented for CPython already? +--------------------------------------------------------- + +Because of the additional complexity required, and mostly, because of +performance issues. There have been some experiments already with +CPython on *Hardware* Transactional Memory: + +* `Riley and Zilles (2006)`__ +* `Tabba (2010)`__ + +.. __: http://sabi.net/nriley/pubs/dls6-riley.pdf +.. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf + +The motivation for using PyPy instead of CPython is the extra +flexibility of the general approach, as well as the ability to utilize +the JIT in order to remove part of the overhead that comes with +*Software* Transactional Memory. + + +A GIL-less Python is impossible. +-------------------------------- + +This is a classic criticism of research-oriented projects. We believe +that the `work plan`_ plan below can make a serious impact on considering +possible a GIL-less Python. We believe we can do it, but at the +very least, even if this work generates a negative result, the negative +result will document the challenges faced should someone else want to +reattempt the idea in the future. + +Nevertheless other projects, such as Psyco (also by Armin) and PyPy +itself, were called impossible before they were attempted, and have +hitherto been very successful. + + +Why do it with PyPy instead of CPython? +--------------------------------------- + +Because PyPy is designed to be open to this kind of research. This will +require no work in the Python interpreter part of PyPy, and instead we +can focus on e.g. the concurrent garbage collection and the JIT issues. +`Riley and Zilles`__ have also experimented with Hardware Transactional +Memory using PyPy. By contrast, for example, CPython is stuck with +reference counting, which is known to be an issue for Transactional +Memory (`Tabba`__ proposes to give up and use Boehm_, which is a bad +idea in our experience, particularly because of scalability issues). + +.. __: http://sabi.net/nriley/pubs/dls6-riley.pdf +.. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf +.. _Boehm: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ + + +What Python interface will I use? +--------------------------------- + +Previous attempts on Hardware +Transactional Memory focused on parallelizing existing programs written +using the ``thread`` or ``threading`` modules. However, as argued +here__, this may not be the most practical way to achieve real +multithreading; it seems that better alternatives would offer good +scalability too. Notably, TM could benefit any event-based system that +is written to dispatch events serially (Twisted-based, most GUI toolkit, +Stackless, gevent, and so on). The events would internally be processed +in parallel, while maintaining the illusion of serial execution, with +all the corresponding benefits of safety. This should be possible with minimal +changes to the event dispatchers. This approach has been described by the +`Automatic Mutual Exclusion`_ work at Microsoft Research, but not been +implemented anywhere (to the best of our knowledge). + +.. _`Automatic Mutual Exclusion`: http://research.microsoft.com/en-us/projects/ame/default.aspx + +Note that, yes, this gives you both sides of the coin: you keep using +your non-thread-based program (without worrying about locks and their +drawbacks like deadlocks, races, and friends), *and* your programs +benefit from all your cores. + +.. __: http://mail.python.org/pipermail/pypy-dev/2012-January/009044.html + +In more details, a low-level built-in module will provide the basics to +start transactions in parallel; but this module will be only used +internally in a tweaked version of, say, a Twisted reactor. Using this +reactor will be enough for your existing Twisted-based programs to +actually run on multiple cores. You --- as a developer of the +Twisted-based program --- have only to care about improving the +parallelizability of your program (e.g. by splitting time-consuming +transactions into several parts; the exact rules will be published in +detail once they are known). But the point is that your program is +always correct. + + +Speed +----- + +We estimate the one-year performance target to be 2x-to-5x slower than +the performance of the regular PyPy in fully serial applications. (Of +course, the regular PyPy will not disappear; for the foreseeable future, +PyPy-TM will be an alternative executable.) + +The performance of PyPy-TM running suited applications should scale +linearly or close-to-linearly with the number of processor. This means +that in order to see the benefits, you just need a machine with at least +4 or 8 processors --- which is already common today and will be even +more so in one or two years. + + +.. _`soon available as hardware`: + +Hardware Transactional Memory +----------------------------- + +The performance of PyPy-TM running on Hardware Transactional Memory is +unknown so far, but should ideally be close to the performance of a +regular PyPy. + +In more details: This proposal is for work based entirely on *Software* +Transactional Memory. However, in the future the ideas and most of the +code should map directly to Hardware Transactional Memory (HTM). We +expect HTM to reduce a lot the cost of some of the issues that we face, +but not completely remove it. For example, `AMD's old proposal`_ was +that there would be two ways to emit memory-referencing +instructions: one that goes via the HTM mechanisms, and one (the regular +one) which doesn't. Choosing the best one on a +case-by-case basis in the JIT makes a difference in +performance (although of course not as great as with Software +Transactional Memory). + +`Intel's current proposal`_ on Haswell_ processors does not have this +distinction, which means transactions are likely to quickly overflow the +internal buffers. As a result, the first generation HTM-capable +processors may not be suited for the approach described here. But +this depends on details like the capacity of the hardware buffers that +are still secret at this point. + +(Note also that HTM does not solve some of the issues for implementing +Transactional Memory in CPython, notably the issue with reference +counting. We will have to wait for a real CPython experiment before +we can settle this question. Also, this would "just" remove the GIL +but not offer a multi-core version of non-thread-based programs.) + +.. _`AMD's old proposal`: http://developer.amd.com/tools/ASF/Pages/default.aspx +.. _`Intel's current proposal`: http://software.intel.com/en-us/avx/ +.. _Haswell: http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29 + + +Alternatives +------------ + +PyPy-TM will be slower than judicious usage of existing alternatives, +based on multiple processes that communicate with each other in one way +or another. The counter-argument is that TM is not only a cleaner +solution: there are cases in which it is not doable to organize (or +retrofit) an existing program into the particular format needed for the +alternatives. In particular, small quickly-written programs don't need +the additional baggage of cross-process communication, and large +programs can sometimes be almost impossible to turn into multi-process +versions. By constrast, we believe that TM can fit naturally into most +programs, because it only requires local changes to some dispatcher; the +rest of the program should work without changes. + + +More readings +------------- + +* `Original blog post`__ +* pypy-dev mails `[1]`__ `[2]`__ +* `The most recent blog post`__ + +.. __: http://morepypy.blogspot.com/2011/08/we-need-software-transactional-memory.html +.. __: http://mail.python.org/pipermail/pypy-dev/2011-August/008153.html +.. __: http://mail.python.org/pipermail/pypy-dev/2012-January/009034.html +.. __: http://morepypy.blogspot.com/2012/01/transactional-memory-ii.html + + +Work plan +========= + +This is an very rough estimate of the amount of work it would take to +complete the steps for an experienced developer who is already familiar +with the PyPy codebase. As this is a research proposal, we cannot +guarantee the time estimates here, but we do agree to report regularly to +the community, so our progress can be followed publicly. + +Paid work will be at $60/hour, but at least one developer who will work on +the project --- Armin Rigo --- has committed to 2 hours +of volunteer work per paid hour (so +the total amount of money that we ask is divided by three). A 5% general +donation will go to the `Software Freedom Conservancy`_ itself, the +non-profit organization of which the PyPy project is a member and which +manages all the issues related to donations, payments, and tax-exempt +status. + +.. _`Software Freedom Conservancy`: http://sfconservancy.org/ +.. _rstm: http://www.cs.rochester.edu/research/synchronization/rstm/ + + +* **STM Library**: + + This part covers adapting an existing STM library for PyPy. It is + already mostly done (based on rstm_), but additional tweaks may be + required. + +* **Basic tweaks of the translation process**: + + This part covers tweaks needed during the translation process in + order to generate an STM-aware version of the RPython programs, + including PyPy itself. It is partly done, but not finished. + Estimate: 1 month. + +* **Garbage collection**: + + We need a different garbage collector that is able to cope at least + with concurrent allocations. From there, improving the situation is + its own open-ended subproject: we can add for example various kinds of + parallel collection, synchronized or unsynchronized root tracing, + etc. Estimate for the basic part: 2 months. Estimate for the rest: + 4 extra months. + +* **User interface**: + + This is the problem of designing and implementing some interface or + interfaces for the Python programmer. We put it in its own category + because of its "end-user" importance. Estimate: 2 months. + +* **JIT integration**: + + The above would give us a (probably very slow) version of PyPy-TM. + This final part is to integrate it with the JIT compiler generator. + The main issue we foresee is integration with the new GC, detecting + with object flags or JIT optimizations which objects need + transactional memory status or not. We think that with enough such + optimizations we can seriously lower the overhead of PyPy-TM, maybe + down to 2x slower than a regular PyPy or better. Estimate: unknown; + at least 4 months. + +* **Longer term**: + + In the longer term, we might need to refine the TM processing done + above, for example to better support I/O (e.g. we can queue the writes + done to a log file) or to add some special fine-grained support + (e.g. two transactions that each do ``samelist.append()`` do not need + to conflict in the simple case). This part is not included + in the estimates. + +Total: 5 months for the initial version; at least 8 additional months +for the fast version. We will go with a total estimate of 15 months, +corresponding to USD$151200. The amount sought by this fundraising +campaign, considering the 2 volunteer hours per paid hour is thus USD$50400. + + +Benefits of This Work to the Python Community and the General Public +==================================================================== + +Python has become one of the most popular dynamic programming languages in +the world. Web developers, educators, and scientific programmers alike +all value Python because Python code is often more readable and because +Python often increases programmer productivity. + +Traditionally, languages like Python ran more slowly than static, compiled +languages; Python developers chose to sacrifice execution speed for ease +of programming. The PyPy project created a substantially improved Python +language implementation, including a fast Just-in-time (JIT) compiler. +The increased execution speed that PyPy provides has attracted many users, +who now find their Python code runs up to four times faster under PyPy +than under the reference implementation written in C. + +However, in the presence of today's machines with multiple processors, +Python progress lags behind. The issue has been described in the +introduction: developers that really need to use multiple CPUs are +constrained to select and use one of the multi-process solutions that +are all in some way or another hacks requiring extra knowledge and +efforts to use. The focus of the work described in this proposal is to +offer an alternative in the core of the Python language --- an +alternative that can naturally integrate with the rest of the program. +This alternative will be implemented in PyPy. + +PyPy's developers make all PyPy software available to the public without +charge, under PyPy's Open Source copyright license, the permissive MIT +License. PyPy's license assures that PyPy is equally available to +everyone freely on terms that allow both non-commercial and commercial +activity. This license allows for academics, for-profit software +developers, volunteers and enthusiasts alike to collaborate together to +make a better Python implementation for everyone. + +PyPy-TM will be available under the same license. Being licensed freely +to the general public means that opportunities to use, improve and learn +about how Transactional Memory works itself will be generally available +to everyone. diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -38,7 +38,7 @@

    - +
    diff --git a/success.html b/success.html --- a/success.html +++ b/success.html @@ -38,7 +38,7 @@

    - +
    diff --git a/tmdonate.html b/tmdonate.html new file mode 100644 --- /dev/null +++ b/tmdonate.html @@ -0,0 +1,355 @@ + + + + PyPy :: Call for donations - Transactional Memory in PyPy + + + + + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Call for donations - Transactional Memory in PyPy

    +
    +

    Introduction

    +

    In the presence of today's machines with multiple processors, Python +progress is lagging behind: on any CPU-constrained program, developers +have a difficult choice to make. They can use in-process solutions that +do not offer multi-CPU usage. In this respect, the natural choice +nowadays is to use Twisted or other event-based paradigms, or systems +that hide events in the control flow, like Stackless; or alternatively, +they can use the existing threading module, with its associated GIL +and the complexities of real multi-threaded programming (locks, +deadlocks, races, etc.), which make this solution less attractive. The +big alternative is for them to rely on one of various multi-process +solutions that are outside the scope of the core language; all of them +in some way or another are hacks that require extra knowledge and time +to use and that have an impact on the structure of the whole program.

    +

    This proposal is about researching and implementing Transactional Memory +in PyPy. This is a technique that recently came to the front of the +multi-core scene. It promizes to offer multi-core CPU usage without +requiring to fall back to the multi-process solutions described above, +and also without using the threading module – just as a small, +local extension of the programming language that would be used only in +the core of the event loops.

    +

    (Jump directly to What Python interface will I use? for practical +details.)

    +
    +
    +

    In more details

    +

    This is a call for financial help in researching and implementing a +version of PyPy able to use multiple processors in a single process. +This will give a GIL-less Python, i.e. a Python that runs without the +infamous Global Interpreter Lock.

    +

    The basic ideas to do it have been discussed on pypy-dev [1] [2] +and on a blog post [3]. +The goal is to adapt Transactional Memory – currently only available +as software, but soon available as hardware – to the task of running +sections of Python code in parallel on multiple processors, while giving +the programmer the illusion of serial execution. It is called below +“PyPy-TM”.

    +

    The main developer will be Armin Rigo. +This is a “researchy” goal in the sense of us not being quite sure of +the performance of the result. We currently estimate the one-year +performance goal at 2x-to-5x slower than regular PyPy in fully serial +applications. We feel confident that it can work, though, in the +following sense: the performance of PyPy-TM running suited applications +should scale linearly or close-to-linearly with the number of processors. +This means that you just need a machine with at least 4 or 8 processors, +which is already common today and will be even more so in one or two +years.

    +

    You will find below a sketch of the work plan. If more money than +requested is collected, then the excess will be entered into the general +PyPy pot, used for example to finance sprint travel costs to students.

    +

    Note For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means.

    +
    +

    What is the Global Interpreter Lock?

    +

    The GIL, or Global Interpreter Lock, is a single lock in both CPython +and (so far) PyPy, that all threads must acquire in order to execute +Python bytecodes. This means that so far, in Python, even when using +threads we do not gain any benefit in term of multicore performance.

    +
    +
    +

    What is Transactional Memory?

    +

    Transactional Memory ™ is a technique imported from databases: every +time we want to do a change to the processors' main memory, we do it in +a “transaction”. Multiple transactions can be executed in parallel by +multiple cores. When a transaction is complete, we try to commit it. +This might either succeed, or (if another transaction committed +incompatible changes) fail. If it fails, which is hopefully rare, we +need to restart the transaction from scratch.

    +
    +
    +

    Why hasn't the idea been implemented for CPython already?

    +

    Because of the additional complexity required, and mostly, because of +performance issues. There have been some experiments already with +CPython on Hardware Transactional Memory:

    + +

    The motivation for using PyPy instead of CPython is the extra +flexibility of the general approach, as well as the ability to utilize +the JIT in order to remove part of the overhead that comes with +Software Transactional Memory.

    +
    +
    +

    A GIL-less Python is impossible.

    +

    This is a classic criticism of research-oriented projects. We believe +that the work plan plan below can make a serious impact on considering +possible a GIL-less Python. We believe we can do it, but at the +very least, even if this work generates a negative result, the negative +result will document the challenges faced should someone else want to +reattempt the idea in the future.

    +

    Nevertheless other projects, such as Psyco (also by Armin) and PyPy +itself, were called impossible before they were attempted, and have +hitherto been very successful.

    +
    +
    +

    Why do it with PyPy instead of CPython?

    +

    Because PyPy is designed to be open to this kind of research. This will +require no work in the Python interpreter part of PyPy, and instead we +can focus on e.g. the concurrent garbage collection and the JIT issues. +Riley and Zilles have also experimented with Hardware Transactional +Memory using PyPy. By contrast, for example, CPython is stuck with +reference counting, which is known to be an issue for Transactional +Memory (Tabba proposes to give up and use Boehm, which is a bad +idea in our experience, particularly because of scalability issues).

    +
    +
    +

    What Python interface will I use?

    +

    Previous attempts on Hardware +Transactional Memory focused on parallelizing existing programs written +using the thread or threading modules. However, as argued +here, this may not be the most practical way to achieve real +multithreading; it seems that better alternatives would offer good +scalability too. Notably, TM could benefit any event-based system that +is written to dispatch events serially (Twisted-based, most GUI toolkit, +Stackless, gevent, and so on). The events would internally be processed +in parallel, while maintaining the illusion of serial execution, with +all the corresponding benefits of safety. This should be possible with minimal +changes to the event dispatchers. This approach has been described by the +Automatic Mutual Exclusion work at Microsoft Research, but not been +implemented anywhere (to the best of our knowledge).

    +

    Note that, yes, this gives you both sides of the coin: you keep using +your non-thread-based program (without worrying about locks and their +drawbacks like deadlocks, races, and friends), and your programs +benefit from all your cores.

    +

    In more details, a low-level built-in module will provide the basics to +start transactions in parallel; but this module will be only used +internally in a tweaked version of, say, a Twisted reactor. Using this +reactor will be enough for your existing Twisted-based programs to +actually run on multiple cores. You – as a developer of the +Twisted-based program – have only to care about improving the +parallelizability of your program (e.g. by splitting time-consuming +transactions into several parts; the exact rules will be published in +detail once they are known). But the point is that your program is +always correct.

    +
    +
    +

    Speed

    +

    We estimate the one-year performance target to be 2x-to-5x slower than +the performance of the regular PyPy in fully serial applications. (Of +course, the regular PyPy will not disappear; for the foreseeable future, +PyPy-TM will be an alternative executable.)

    +

    The performance of PyPy-TM running suited applications should scale +linearly or close-to-linearly with the number of processor. This means +that in order to see the benefits, you just need a machine with at least +4 or 8 processors – which is already common today and will be even +more so in one or two years.

    +
    +
    +

    Hardware Transactional Memory

    +

    The performance of PyPy-TM running on Hardware Transactional Memory is +unknown so far, but should ideally be close to the performance of a +regular PyPy.

    +

    In more details: This proposal is for work based entirely on Software +Transactional Memory. However, in the future the ideas and most of the +code should map directly to Hardware Transactional Memory (HTM). We +expect HTM to reduce a lot the cost of some of the issues that we face, +but not completely remove it. For example, AMD's old proposal was +that there would be two ways to emit memory-referencing +instructions: one that goes via the HTM mechanisms, and one (the regular +one) which doesn't. Choosing the best one on a +case-by-case basis in the JIT makes a difference in +performance (although of course not as great as with Software +Transactional Memory).

    +

    Intel's current proposal on Haswell processors does not have this +distinction, which means transactions are likely to quickly overflow the +internal buffers. As a result, the first generation HTM-capable +processors may not be suited for the approach described here. But +this depends on details like the capacity of the hardware buffers that +are still secret at this point.

    +

    (Note also that HTM does not solve some of the issues for implementing +Transactional Memory in CPython, notably the issue with reference +counting. We will have to wait for a real CPython experiment before +we can settle this question. Also, this would “just” remove the GIL +but not offer a multi-core version of non-thread-based programs.)

    +
    +
    +

    Alternatives

    +

    PyPy-TM will be slower than judicious usage of existing alternatives, +based on multiple processes that communicate with each other in one way +or another. The counter-argument is that TM is not only a cleaner +solution: there are cases in which it is not doable to organize (or +retrofit) an existing program into the particular format needed for the +alternatives. In particular, small quickly-written programs don't need +the additional baggage of cross-process communication, and large +programs can sometimes be almost impossible to turn into multi-process +versions. By constrast, we believe that TM can fit naturally into most +programs, because it only requires local changes to some dispatcher; the +rest of the program should work without changes.

    +
    +
    +

    More readings

    + +
    +
    +
    +

    Work plan

    +

    This is an very rough estimate of the amount of work it would take to +complete the steps for an experienced developer who is already familiar +with the PyPy codebase. As this is a research proposal, we cannot +guarantee the time estimates here, but we do agree to report regularly to +the community, so our progress can be followed publicly.

    +

    Paid work will be at $60/hour, but at least one developer who will work on +the project – Armin Rigo – has committed to 2 hours +of volunteer work per paid hour (so +the total amount of money that we ask is divided by three). A 5% general +donation will go to the Software Freedom Conservancy itself, the +non-profit organization of which the PyPy project is a member and which +manages all the issues related to donations, payments, and tax-exempt +status.

    +
      +
    • STM Library:

      +

      This part covers adapting an existing STM library for PyPy. It is +already mostly done (based on rstm), but additional tweaks may be +required.

      +
    • +
    • Basic tweaks of the translation process:

      +

      This part covers tweaks needed during the translation process in +order to generate an STM-aware version of the RPython programs, +including PyPy itself. It is partly done, but not finished. +Estimate: 1 month.

      +
    • +
    • Garbage collection:

      +

      We need a different garbage collector that is able to cope at least +with concurrent allocations. From there, improving the situation is +its own open-ended subproject: we can add for example various kinds of +parallel collection, synchronized or unsynchronized root tracing, +etc. Estimate for the basic part: 2 months. Estimate for the rest: +4 extra months.

      +
    • +
    • User interface:

      +

      This is the problem of designing and implementing some interface or +interfaces for the Python programmer. We put it in its own category +because of its “end-user” importance. Estimate: 2 months.

      +
    • +
    • JIT integration:

      +

      The above would give us a (probably very slow) version of PyPy-TM. +This final part is to integrate it with the JIT compiler generator. +The main issue we foresee is integration with the new GC, detecting +with object flags or JIT optimizations which objects need +transactional memory status or not. We think that with enough such +optimizations we can seriously lower the overhead of PyPy-TM, maybe +down to 2x slower than a regular PyPy or better. Estimate: unknown; +at least 4 months.

      +
    • +
    • Longer term:

      +

      In the longer term, we might need to refine the TM processing done +above, for example to better support I/O (e.g. we can queue the writes +done to a log file) or to add some special fine-grained support +(e.g. two transactions that each do samelist.append() do not need +to conflict in the simple case). This part is not included +in the estimates.

      +
    • +
    +

    Total: 5 months for the initial version; at least 8 additional months +for the fast version. We will go with a total estimate of 15 months, +corresponding to USD$151200. The amount sought by this fundraising +campaign, considering the 2 volunteer hours per paid hour is thus USD$50400.

    +
    +
    +

    Benefits of This Work to the Python Community and the General Public

    +

    Python has become one of the most popular dynamic programming languages in +the world. Web developers, educators, and scientific programmers alike +all value Python because Python code is often more readable and because +Python often increases programmer productivity.

    +

    Traditionally, languages like Python ran more slowly than static, compiled +languages; Python developers chose to sacrifice execution speed for ease +of programming. The PyPy project created a substantially improved Python +language implementation, including a fast Just-in-time (JIT) compiler. +The increased execution speed that PyPy provides has attracted many users, +who now find their Python code runs up to four times faster under PyPy +than under the reference implementation written in C.

    +

    However, in the presence of today's machines with multiple processors, +Python progress lags behind. The issue has been described in the +introduction: developers that really need to use multiple CPUs are +constrained to select and use one of the multi-process solutions that +are all in some way or another hacks requiring extra knowledge and +efforts to use. The focus of the work described in this proposal is to +offer an alternative in the core of the Python language – an +alternative that can naturally integrate with the rest of the program. +This alternative will be implemented in PyPy.

    +

    PyPy's developers make all PyPy software available to the public without +charge, under PyPy's Open Source copyright license, the permissive MIT +License. PyPy's license assures that PyPy is equally available to +everyone freely on terms that allow both non-commercial and commercial +activity. This license allows for academics, for-profit software +developers, volunteers and enthusiasts alike to collaborate together to +make a better Python implementation for everyone.

    +

    PyPy-TM will be available under the same license. Being licensed freely +to the general public means that opportunities to use, improve and learn +about how Transactional Memory works itself will be generally available +to everyone.

    +
    +
    + +
    +
    +
    + + \ No newline at end of file From noreply at buildbot.pypy.org Mon Mar 5 23:42:48 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Mar 2012 23:42:48 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: merge Message-ID: <20120305224248.4D2C382AAC@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r338:513ca7f1c095 Date: 2012-03-05 14:42 -0800 http://bitbucket.org/pypy/pypy.org/changeset/513ca7f1c095/ Log: merge diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -76,8 +76,10 @@
  • Linux binary (32bit) (openssl0.9.8 notes)
  • Linux binary (64bit) (openssl0.9.8 notes)
  • Mac OS/X binary (64bit)
  • -
  • Windows binary (32bit)
  • -

    If your CPU is really old, it may not have SSE2. In this case, you need +

  • Windows binary (32bit) (you need the VS 2010 runtime libraries) +note: the zip file contains the wrong version, msvcrt90.dll :-(
  • + +

    If your CPU is really old, it may not have SSE2. In this case, you need to translate yourself with the option --jit-backend=x86-without-sse2.

    @@ -87,7 +89,8 @@
  • The most up-to-date nightly build with a JIT, if the official release is too old for what you want to do.
  • No JIT: A version without the JIT. Consumes a bit less memory -and may be faster on short-running scripts.
  • +and may be faster on short-running scripts. (Note that a similar +effect can be obtained by running pypy --jit off.)
  • Sandboxing: A special safe version. Read the docs about sandboxing. (It is also possible to translate a version that includes both sandboxing and the JIT compiler, although as the JIT is relatively diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -45,13 +45,14 @@ * `Linux binary (32bit)`__ (`openssl0.9.8 notes`_) * `Linux binary (64bit)`__ (`openssl0.9.8 notes`_) * `Mac OS/X binary (64bit)`__ -* `Windows binary (32bit)`__ +* `Windows binary (32bit)`__ (you need the `VS 2010 runtime libraries`_) + *note: the zip file contains the wrong version, msvcrt90.dll :-(* .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-linux.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-linux64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-osx64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-1.8-win32.zip -.. VS 2010 runtime libraries: http://www.microsoft.com/downloads/en/details.aspx?familyid=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84 +.. _`VS 2010 runtime libraries`: http://www.microsoft.com/download/en/details.aspx?displaylang=en&id=5555 If your CPU is really old, it may not have SSE2. In this case, you need to translate_ yourself with the option ``--jit-backend=x86-without-sse2``. @@ -67,7 +68,8 @@ release is too old for what you want to do. * No JIT: A version without the JIT. Consumes a bit less memory - and may be faster on short-running scripts. + and may be faster on short-running scripts. (Note that a similar + effect can be obtained by running ``pypy --jit off``.) * Sandboxing: A special safe version. Read the docs about sandboxing_. (It is also possible to translate_ a version that includes both From noreply at buildbot.pypy.org Mon Mar 5 23:44:16 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Mar 2012 23:44:16 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: oops Message-ID: <20120305224416.E028E82AAC@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r339:3e5779b833dc Date: 2012-03-05 14:44 -0800 http://bitbucket.org/pypy/pypy.org/changeset/3e5779b833dc/ Log: oops diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -1,9 +1,9 @@
    • - Donate towards STM in pypy
      + Donate towards STM in pypy
      Donate towards py3k in pypy
      Donate towards general pypy progress
      - Donate towards NumPy in pypy
      + Donate towards NumPy in pypy
    • - $40035 of $105000 (38.0%) + $43438 of $105000 (41.4%)
      diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,12 +9,12 @@ - $43280 of $60000 (72.1%) + $44099 of $60000 (73.5%)
      diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,12 +9,12 @@ - $0 of $50400 + $1925 of $50400 (3.8%)
      From noreply at buildbot.pypy.org Tue Mar 20 09:06:29 2012 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Mar 2012 09:06:29 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: merge Message-ID: <20120320080629.CC7AA8236B@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r349:2d84bb2dfdf1 Date: 2012-03-20 10:06 +0200 http://bitbucket.org/pypy/pypy.org/changeset/2d84bb2dfdf1/ Log: merge diff --git a/archive.html b/archive.html --- a/archive.html +++ b/archive.html @@ -38,7 +38,7 @@

  • - +
    diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -38,7 +38,7 @@

    - +
    diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -38,7 +38,7 @@

    - +
    @@ -50,6 +50,7 @@
  • mailing list: pypy-dev at python.org
  • the bug tracker
  • more on our dev site.
  • +
  • code on bitbucket.

  • - +
    diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -38,7 +38,7 @@

    - +
    diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -38,7 +38,7 @@

    - +
    diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -38,7 +38,7 @@

    - +
    diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -38,7 +38,7 @@

    - +
    @@ -65,7 +65,7 @@ at the latest, we will try our best to make PyPy support NumPy anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501(c)(3) charitable organization incorporated in NY, USA, all funds will, +501©(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

    diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -38,7 +38,7 @@

    - +
    diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -38,7 +38,7 @@

    - +
    diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -38,7 +38,7 @@

    - +
    @@ -74,7 +74,7 @@ at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501(c)(3) charitable organization incorporated in NY, USA, all funds will, +501©(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

    diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -15,7 +15,7 @@ ('Contact', 'contact.html'), ('Py3k donations', 'py3donate.html'), ('NumPy donations', 'numpydonate.html'), - ('STM donations', 'tmdonate.html'), + ('STM/AME donations', 'tmdonate.html'), ], } diff --git a/source/contact.txt b/source/contact.txt --- a/source/contact.txt +++ b/source/contact.txt @@ -14,6 +14,9 @@ * more on our `dev site`_. +* code on `bitbucket`_. + .. __: http://python.org/mailman/listinfo/pypy-dev .. _`bug tracker`: https://bugs.pypy.org .. _`dev site`: http://doc.pypy.org +.. _`bitbucket`: https://bitbucket.org/pypy/pypy/overview diff --git a/source/tmdonate.txt b/source/tmdonate.txt --- a/source/tmdonate.txt +++ b/source/tmdonate.txt @@ -1,11 +1,11 @@ --- layout: page -title: Call for donations - Transactional Memory in PyPy +title: Call for donations - Transactional Memory / Automatic Mutual Exclusion in PyPy --- -==================== -Transactional Memory -==================== +================================================= +Transactional Memory / Automatic Mutual Exclusion +================================================= Introduction @@ -27,7 +27,7 @@ This proposal is about researching and implementing Transactional Memory in PyPy. This is a technique that recently came to the front of the -multi-core scene. It promizes to offer multi-core CPU usage without +multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions described above, and also without using the ``threading`` module --- just as a small, local extension of the programming language that would be used only in @@ -90,7 +90,7 @@ What is Transactional Memory? ----------------------------- -`Transactional Memory`_ (TM) is a technique imported from databases: every +`Transactional Memory`_ --- TM --- is a technique imported from databases: every time we want to do a change to the processors' main memory, we do it in a "transaction". Multiple transactions can be executed in parallel by multiple cores. When a transaction is complete, we try to commit it. @@ -124,7 +124,7 @@ -------------------------------- This is a classic criticism of research-oriented projects. We believe -that the `work plan`_ plan below can make a serious impact on considering +that the `work plan`_ below can make a serious impact on considering possible a GIL-less Python. We believe we can do it, but at the very least, even if this work generates a negative result, the negative result will document the challenges faced should someone else want to @@ -255,7 +255,7 @@ alternatives. In particular, small quickly-written programs don't need the additional baggage of cross-process communication, and large programs can sometimes be almost impossible to turn into multi-process -versions. By constrast, we believe that TM can fit naturally into most +versions. By contrast, we believe that TM can fit naturally into most programs, because it only requires local changes to some dispatcher; the rest of the program should work without changes. diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -38,7 +38,7 @@

    - +
    diff --git a/success.html b/success.html --- a/success.html +++ b/success.html @@ -38,7 +38,7 @@

    - +
    diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -1,7 +1,7 @@ - PyPy :: Call for donations - Transactional Memory in PyPy + PyPy :: Call for donations - Transactional Memory / Automatic Mutual Exclusion in PyPy @@ -38,13 +38,13 @@

    - +
    -

    Call for donations - Transactional Memory in PyPy

    +

    Call for donations - Transactional Memory / Automatic Mutual Exclusion in PyPy

    Introduction

    In the presence of today's machines with multiple processors, Python @@ -62,7 +62,7 @@ to use and that have an impact on the structure of the whole program.

    This proposal is about researching and implementing Transactional Memory in PyPy. This is a technique that recently came to the front of the -multi-core scene. It promizes to offer multi-core CPU usage without +multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions described above, and also without using the threading module – just as a small, local extension of the programming language that would be used only in @@ -109,7 +109,7 @@

    What is Transactional Memory?

    -

    Transactional Memory ™ is a technique imported from databases: every +

    Transactional Memory – TM – is a technique imported from databases: every time we want to do a change to the processors' main memory, we do it in a “transaction”. Multiple transactions can be executed in parallel by multiple cores. When a transaction is complete, we try to commit it. @@ -134,7 +134,7 @@

    A GIL-less Python is impossible.

    This is a classic criticism of research-oriented projects. We believe -that the work plan plan below can make a serious impact on considering +that the work plan below can make a serious impact on considering possible a GIL-less Python. We believe we can do it, but at the very least, even if this work generates a negative result, the negative result will document the challenges faced should someone else want to @@ -234,7 +234,7 @@ alternatives. In particular, small quickly-written programs don't need the additional baggage of cross-process communication, and large programs can sometimes be almost impossible to turn into multi-process -versions. By constrast, we believe that TM can fit naturally into most +versions. By contrast, we believe that TM can fit naturally into most programs, because it only requires local changes to some dispatcher; the rest of the program should work without changes.

    From noreply at buildbot.pypy.org Tue Mar 20 14:00:13 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 20 Mar 2012 14:00:13 +0100 (CET) Subject: [pypy-commit] pypy default: kill the run task and related code Message-ID: <20120320130013.65E818236A@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r53829:0e5b140cf620 Date: 2012-03-20 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/0e5b140cf620/ Log: kill the run task and related code diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), From noreply at buildbot.pypy.org Tue Mar 20 15:47:29 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 20 Mar 2012 15:47:29 +0100 (CET) Subject: [pypy-commit] pyrepl py3ksupport: undo misstake Message-ID: <20120320144729.0EA998236A@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: py3ksupport Changeset: r164:0ceb86767590 Date: 2012-03-20 15:46 +0100 http://bitbucket.org/pypy/pyrepl/changeset/0ceb86767590/ Log: undo misstake diff --git a/pyrepl/keymap.py b/pyrepl/keymap.py --- a/pyrepl/keymap.py +++ b/pyrepl/keymap.py @@ -172,9 +172,8 @@ def compile_keymap(keymap, empty=b''): r = {} - import pprint for key, value in keymap.items(): - r.setdefault(key[:1], {})[key[1:]] = value + r.setdefault(key[0], {})[key[1:]] = value for key, value in r.items(): if empty in value: if len(value) != 1: diff --git a/testing/test_keymap.py b/testing/test_keymap.py --- a/testing/test_keymap.py +++ b/testing/test_keymap.py @@ -1,6 +1,8 @@ +import pytest from pyrepl.keymap import compile_keymap + at pytest.mark.skip('completely wrong') def test_compile_keymap(): k = compile_keymap({ b'a': 'test', From noreply at buildbot.pypy.org Tue Mar 20 15:47:30 2012 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 20 Mar 2012 15:47:30 +0100 (CET) Subject: [pypy-commit] pyrepl py3ksupport: merge Message-ID: <20120320144730.315DD8236A@wyvern.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: py3ksupport Changeset: r165:1cd29805192b Date: 2012-03-20 15:46 +0100 http://bitbucket.org/pypy/pyrepl/changeset/1cd29805192b/ Log: merge diff --git a/encopyright.py b/encopyright.py --- a/encopyright.py +++ b/encopyright.py @@ -20,11 +20,10 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import os, time, sys -import bzrlib.branch -import bzrlib.log +import py header_template = """\ -# Copyright 2000-%s Michael Hudson-Doyle %s +# Copyright 2000-%(lastyear)s Michael Hudson-Doyle %(others)s # # All Rights Reserved # @@ -46,64 +45,69 @@ author_template = "\n#%s%%s"%(' '*(header_template.index("Michael")+1),) -branch, path = bzrlib.branch.Branch.open_containing(sys.argv[0]) -rev_tree = branch.basis_tree() -branch.lock_read() -def process(thing): - if os.path.isdir(thing): - for subthing in os.listdir(thing): - process(os.path.join(thing, subthing)) - elif os.path.isfile(thing): - if thing[-3:] == '.py': - process_file(thing) - else: - print "W `%s' not file or directory"%(thing,) author_map = { u'mwh': None, + u'micahel': None, u'Michael Hudson ': None, u'arigo': u"Armin Rigo", u'antocuni': u'Antonio Cuni', + u'anto': u'Antonio Cuni', u'bob': u'Bob Ippolito', u'fijal': u'Maciek Fijalkowski', u'agaynor': u'Alex Gaynor', u'hpk': u'Holger Krekel', + u'Ronny': u'Ronny Pfannschmidt', + u'amauryfa': u"Amaury Forgeot d'Arc", } -def process_file(file): - ilines = open(file).readlines() - file_id = rev_tree.path2id(file) - rev_ids = [rev_id for (revno, rev_id, what) - in bzrlib.log.find_touching_revisions(branch, file_id)] - revs = branch.repository.get_revisions(rev_ids) - revs = sorted(revs, key=lambda x:x.timestamp) - modified_year = None - for rev in reversed(revs): - if 'encopyright' not in rev.message: - modified_year = time.gmtime(rev.timestamp)[0] - break + +def author_revs(path): + proc = py.std.subprocess.Popen([ + 'hg','log', str(path), + '--template', '{author|user} {date}\n', + '-r', 'not keyword("encopyright")', + ], stdout=py.std.subprocess.PIPE) + output, _ = proc.communicate() + lines = output.splitlines() + for line in lines: + try: + name, date = line.split(None, 1) + except ValueError: + pass + else: + if '-' in date: + date = date.split('-')[0] + yield name, float(date) + + +def process(path): + ilines = path.readlines() + revs = sorted(author_revs(path), key=lambda x:x[1]) + modified_year = time.gmtime(revs[-1][1])[0] if not modified_year: - print 'E: no sensible modified_year found for %s' % file, + print 'E: no sensible modified_year found for', path modified_year = time.gmtime(time.time())[0] - authors = set() - for rev in revs: - authors.update(rev.get_apparent_authors()) extra_authors = [] + authors = set(rev[0] for rev in revs) for a in authors: if a not in author_map: - print 'E: need real name for %r' % a + print 'E: need real name for', a ea = author_map.get(a) if ea: extra_authors.append(ea) extra_authors.sort() - header = header_template % (modified_year, ''.join([author_template%ea for ea in extra_authors])) + header = header_template % { + 'lastyear': modified_year, + 'others': ''.join([author_template%ea for ea in extra_authors]) + } header_lines = header.splitlines() prelines = [] old_copyright = [] if not ilines: - print "W ignoring empty file `%s'"%(file,) + print "W ignoring empty file", path return i = 0 @@ -123,8 +127,8 @@ if abs(len(old_copyright) - len(header_lines)) < 2 + len(extra_authors): for x, y in zip(old_copyright, header_lines): if x[:-1] != y: - print "C change needed in", file - ofile = open(file, "w") + print "C change needed in", path + ofile = path.open("w") for l in prelines: ofile.write(l) ofile.write(header + "\n") @@ -133,17 +137,21 @@ ofile.close() break else: - print "M no change needed in", file + print "M no change needed in", path else: print "A no (c) in", file - ofile = open(file, "w") - for l in prelines: - ofile.write(l) - ofile.write(header + "\n\n") - for l in ilines[len(prelines):]: - ofile.write(l) - ofile.close() - + with path.open("w") as ofile: + for l in prelines: + ofile.write(l) + ofile.write(header + "\n\n") + for l in ilines[len(prelines):]: + ofile.write(l) + for thing in sys.argv[1:]: - process(thing) + path = py.path.local(thing) + if path.check(dir=1): + for item in path.visit('*.py'): + process(item) + elif path.check(file=1, ext='py'): + process(path) From noreply at buildbot.pypy.org Tue Mar 20 15:52:33 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: revert 6c0f46ca2071 and 0c0fd7170ad3, and move the logic inside str.__str__, also improving test_call_unicode. Now bot test_call_unicode and test_returns_subclass work Message-ID: <20120320145233.0C5CE8236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53830:5f4d5bfbc2d3 Date: 2012-03-20 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/5f4d5bfbc2d3/ Log: revert 6c0f46ca2071 and 0c0fd7170ad3, and move the logic inside str.__str__, also improving test_call_unicode. Now bot test_call_unicode and test_returns_subclass work diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -297,6 +297,7 @@ class U(str): pass assert str(U()).__class__ is str + assert U().__str__().__class__ is str assert U('test') == 'test' assert U('test').__class__ is U diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -94,7 +94,11 @@ return ''.join(result) def str__Unicode(space, w_uni): - return w_uni + if space.is_w(space.type(w_uni), space.w_unicode): + return w_uni + else: + # Subtype -- return genuine unicode string with the same value. + return space.wrap(space.unicode_w(w_uni)) def eq__Unicode_Unicode(space, w_left, w_right): return space.newbool(w_left._value == w_right._value) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -294,12 +294,7 @@ typename = space.type(w_res).getname(space) msg = "__str__ returned non-string (type %s)" % typename raise OperationError(space.w_TypeError, space.wrap(msg)) - - if space.is_w(space.type(w_res), space.w_unicode): - return w_res - else: - # Subtype -- return genuine unicode string with the same value. - return space.wrap(space.unicode_w(w_res)) + return w_res def descr_new_(space, w_unicodetype, w_object=u'', w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: From noreply at buildbot.pypy.org Tue Mar 20 15:52:35 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill test about marshaling longs, they are no longer there Message-ID: <20120320145235.2D7648236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53831:ba41eb7d7483 Date: 2012-03-20 11:10 +0100 http://bitbucket.org/pypy/pypy/changeset/ba41eb7d7483/ Log: kill test about marshaling longs, they are no longer there diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -9,13 +9,6 @@ space = gettestobjspace(usemodules=('array',)) cls.space = space - def test_long_0(self): - import marshal - z = 0L - z1 = marshal.loads(marshal.dumps(z)) - assert z == z1 - assert type(z1) is long - def test_unmarshal_int64(self): # test that we can unmarshal 64-bit ints on 32-bit platforms # (of course we only test that if we're running on such a From noreply at buildbot.pypy.org Tue Mar 20 15:52:37 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: marshal.loads expects bytes now Message-ID: <20120320145237.AC4548236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53832:726448e6b52c Date: 2012-03-20 11:38 +0100 http://bitbucket.org/pypy/pypy/changeset/726448e6b52c/ Log: marshal.loads expects bytes now diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -14,18 +14,18 @@ # (of course we only test that if we're running on such a # platform :-) import marshal - z = marshal.loads('I\x00\xe4\x0bT\x02\x00\x00\x00') + z = marshal.loads(b'I\x00\xe4\x0bT\x02\x00\x00\x00') assert z == 10000000000 - z = marshal.loads('I\x00\x1c\xf4\xab\xfd\xff\xff\xff') + z = marshal.loads(b'I\x00\x1c\xf4\xab\xfd\xff\xff\xff') assert z == -10000000000 - z = marshal.loads('I\x88\x87\x86\x85\x84\x83\x82\x01') + z = marshal.loads(b'I\x88\x87\x86\x85\x84\x83\x82\x01') assert z == 108793946209421192 - z = marshal.loads('I\xd8\xd8\xd9\xda\xdb\xdc\xcd\xfe') + z = marshal.loads(b'I\xd8\xd8\xd9\xda\xdb\xdc\xcd\xfe') assert z == -0x0132232425262728 def test_buffer(self): import marshal - z = marshal.loads(buffer('i\x02\x00\x00\x00???')) + z = marshal.loads(buffer(b'i\x02\x00\x00\x00???')) assert z == 2 def test_marshal_buffer_object(self): @@ -42,10 +42,10 @@ def test_unmarshal_evil_long(self): import marshal - raises(ValueError, marshal.loads, 'l\x02\x00\x00\x00\x00\x00\x00\x00') - z = marshal.loads('I\x00\xe4\x0bT\x02\x00\x00\x00') + raises(ValueError, marshal.loads, b'l\x02\x00\x00\x00\x00\x00\x00\x00') + z = marshal.loads(b'I\x00\xe4\x0bT\x02\x00\x00\x00') assert z == 10000000000 - z = marshal.loads('I\x00\x1c\xf4\xab\xfd\xff\xff\xff') + z = marshal.loads(b'I\x00\x1c\xf4\xab\xfd\xff\xff\xff') assert z == -10000000000 def test_marshal_code_object(self): From noreply at buildbot.pypy.org Tue Mar 20 15:52:40 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:40 +0100 (CET) Subject: [pypy-commit] pypy py3k: buffer no longer exists in py3k, and 'c' is no longer a valid typecode for array.array Message-ID: <20120320145240.3C4648236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53833:34b65bdb71d8 Date: 2012-03-20 15:08 +0100 http://bitbucket.org/pypy/pypy/changeset/34b65bdb71d8/ Log: buffer no longer exists in py3k, and 'c' is no longer a valid typecode for array.array diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -23,22 +23,11 @@ z = marshal.loads(b'I\xd8\xd8\xd9\xda\xdb\xdc\xcd\xfe') assert z == -0x0132232425262728 - def test_buffer(self): - import marshal - z = marshal.loads(buffer(b'i\x02\x00\x00\x00???')) - assert z == 2 - - def test_marshal_buffer_object(self): - import marshal - s = marshal.dumps(buffer('foobar')) - t = marshal.loads(s) - assert type(t) is str and t == 'foobar' - def test_marshal_bufferlike_object(self): import marshal, array - s = marshal.dumps(array.array('c', 'asd')) + s = marshal.dumps(array.array('b', b'asd')) t = marshal.loads(s) - assert type(t) is str and t == 'asd' + assert type(t) is bytes and t == b'asd' def test_unmarshal_evil_long(self): import marshal From noreply at buildbot.pypy.org Tue Mar 20 15:52:42 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:42 +0100 (CET) Subject: [pypy-commit] pypy default: test_marshal.py was automatically generated, but then was manually modified Message-ID: <20120320145242.CA7FB8236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53834:2fa3fe4007a5 Date: 2012-03-20 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/2fa3fe4007a5/ Log: test_marshal.py was automatically generated, but then was manually modified since 2b1dc35d6317 ("I have no clue why it was written in that strange way." :-)). Kill the original generator, as it's outdated now diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src From noreply at buildbot.pypy.org Tue Mar 20 15:52:45 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 15:52:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: use bytes, not str with marshal.loads Message-ID: <20120320145245.57A5C8236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53835:55b648c8bad1 Date: 2012-03-20 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/55b648c8bad1/ Log: use bytes, not str with marshal.loads diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -176,7 +176,7 @@ def test_bad_typecode(self): import marshal - exc = raises(ValueError, marshal.loads, chr(1)) + exc = raises(ValueError, marshal.loads, b'\x01') assert r"'\x01'" in exc.value.message From noreply at buildbot.pypy.org Tue Mar 20 17:05:12 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Mar 2012 17:05:12 +0100 (CET) Subject: [pypy-commit] pypy default: fix test for 32 bits Message-ID: <20120320160512.45A1F8236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53836:3ac6a9c5d380 Date: 2012-03-20 12:01 -0400 http://bitbucket.org/pypy/pypy/changeset/3ac6a9c5d380/ Log: fix test for 32 bits diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -972,10 +972,16 @@ from pypy.rlib.longlong2float import float2longlong def f(x): return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %f0 -> %i0 - int_return %i0 - """) + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) def check_force_cast(FROM, TO, operations, value): From noreply at buildbot.pypy.org Tue Mar 20 17:05:13 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Mar 2012 17:05:13 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20120320160513.B66058236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53837:345b9dede8f3 Date: 2012-03-20 12:04 -0400 http://bitbucket.org/pypy/pypy/changeset/345b9dede8f3/ Log: merged upstream diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), From noreply at buildbot.pypy.org Tue Mar 20 17:25:14 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Mar 2012 17:25:14 +0100 (CET) Subject: [pypy-commit] pypy default: add the new op to llinterp, fixes tests. Message-ID: <20120320162514.6DC428236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53838:3976bd326a08 Date: 2012-03-20 16:23 +0000 http://bitbucket.org/pypy/pypy/changeset/3976bd326a08/ Log: add the new op to llinterp, fixes tests. diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -770,6 +770,10 @@ checkadr(adr) return llmemory.cast_adr_to_int(adr, mode) + def op_convert_float_bytes_to_longlong(self, f): + from pypy.rlib import longlong2float + return longlong2float.float2longlong(f) + def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -349,6 +349,7 @@ 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() + 'convert_float_bytes_to_longlong': LLOp(canfold=True), # __________ pointer operations __________ From noreply at buildbot.pypy.org Tue Mar 20 18:13:59 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 18:13:59 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Abstract for the STM talk. Message-ID: <20120320171359.411788236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4156:2998cba304fd Date: 2012-03-20 18:13 +0100 http://bitbucket.org/pypy/extradoc/changeset/2998cba304fd/ Log: Abstract for the STM talk. diff --git a/talk/ep2012/stm/abstract.rst b/talk/ep2012/stm/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2012/stm/abstract.rst @@ -0,0 +1,27 @@ + +Kill the GIL..? +=============== + +The GIL, or Global Interpreter Lock, is a well-known issue for Python +programmers that want to have a single program using the multiple cores +of today's machines. + +This talk is *not* about writing a GIL-less Python interpreter; although +hard, this has been done before, notably in Jython. The real issue is +that writing each and every multi-threaded Python programs is hard too. +The ``threading`` module offers locks in several variants, conditions, +events, semaphores... But using them correctly without missing one case +is difficult, impossible to seriously test, often impossible to retrofit +into existing programs, and arguably doesn't scale. (Other solutions +like the ``multiprocessing`` module are at best workarounds, suffering +some of the same issues plus their own ones.) + +Instead, this talk is about an alternate solution: a minimal thread-less +API that lets programs use multiple cores, without worrying about races. +This may sound impossible, but is in fact similar to the API +simplification of using a garbage collected language over an explicitly +managed one --- what is not minimal is "just" the internal +implementation of that API. I will explain how it can actually be done +using Automatic Mutual Exclusion, a technique based on Transactional +Memory. I will give preliminary results on a modified version of the +PyPy Python interpreter that show that it can actually work. From noreply at buildbot.pypy.org Tue Mar 20 18:20:47 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 18:20:47 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add a sentence Message-ID: <20120320172047.E90838236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4157:eddece109518 Date: 2012-03-20 18:20 +0100 http://bitbucket.org/pypy/extradoc/changeset/eddece109518/ Log: Add a sentence diff --git a/talk/ep2012/stm/abstract.rst b/talk/ep2012/stm/abstract.rst --- a/talk/ep2012/stm/abstract.rst +++ b/talk/ep2012/stm/abstract.rst @@ -24,4 +24,7 @@ implementation of that API. I will explain how it can actually be done using Automatic Mutual Exclusion, a technique based on Transactional Memory. I will give preliminary results on a modified version of the -PyPy Python interpreter that show that it can actually work. +PyPy Python interpreter that show that it can actually work. I will +also explain how the API is used, e.g. in a modified Twisted reactor +that gives multi-core capability to any existing, non-thread-based +Twisted program. From noreply at buildbot.pypy.org Tue Mar 20 18:40:44 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 18:40:44 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Update as a two-parts talk. Message-ID: <20120320174044.B1D568236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4158:5aa830ab48b8 Date: 2012-03-20 18:39 +0100 http://bitbucket.org/pypy/extradoc/changeset/5aa830ab48b8/ Log: Update as a two-parts talk. diff --git a/talk/ep2012/stm/abstract.rst b/talk/ep2012/stm/abstract.rst --- a/talk/ep2012/stm/abstract.rst +++ b/talk/ep2012/stm/abstract.rst @@ -1,30 +1,38 @@ -Kill the GIL..? -=============== +PyPy: status and GIL-less future +================================ -The GIL, or Global Interpreter Lock, is a well-known issue for Python -programmers that want to have a single program using the multiple cores -of today's machines. +In the first part of the talk we will present the current status and +speed of PyPy, the Python interpreter written in Python. -This talk is *not* about writing a GIL-less Python interpreter; although -hard, this has been done before, notably in Jython. The real issue is -that writing each and every multi-threaded Python programs is hard too. -The ``threading`` module offers locks in several variants, conditions, -events, semaphores... But using them correctly without missing one case -is difficult, impossible to seriously test, often impossible to retrofit -into existing programs, and arguably doesn't scale. (Other solutions -like the ``multiprocessing`` module are at best workarounds, suffering -some of the same issues plus their own ones.) +The second part of the talk is about one particular feature whose +development is in progress in PyPy: Automatic Mutual Exclusion. +What it is needs some explanation: -Instead, this talk is about an alternate solution: a minimal thread-less -API that lets programs use multiple cores, without worrying about races. -This may sound impossible, but is in fact similar to the API -simplification of using a garbage collected language over an explicitly -managed one --- what is not minimal is "just" the internal -implementation of that API. I will explain how it can actually be done -using Automatic Mutual Exclusion, a technique based on Transactional -Memory. I will give preliminary results on a modified version of the -PyPy Python interpreter that show that it can actually work. I will -also explain how the API is used, e.g. in a modified Twisted reactor -that gives multi-core capability to any existing, non-thread-based -Twisted program. + The GIL, or Global Interpreter Lock, is a well-known issue for Python + programmers that want to have a single program using the multiple + cores of today's machines. + + This talk is *not* about writing a GIL-less Python interpreter; + although hard, this has been done before, notably in Jython. The real + issue is that writing each and every multi-threaded Python programs is + hard too. The ``threading`` module offers locks in several variants, + conditions, events, semaphores... But using them correctly without + missing one case is difficult, impossible to seriously test, often + impossible to retrofit into existing programs, and arguably doesn't + scale. (Other solutions like the ``multiprocessing`` module are at + best workarounds, suffering some of the same issues plus their own + ones.) + + Instead, this talk is about an alternate solution: a minimal + thread-less API that lets programs use multiple cores, without + worrying about races. This may sound impossible, but is in fact + similar to the API simplification of using a garbage collected + language over an explicitly managed one --- what is not minimal is + "just" the internal implementation of that API. I will explain how it + can actually be done using Automatic Mutual Exclusion, a technique + based on Transactional Memory. I will give preliminary results on a + modified version of the PyPy Python interpreter that show that it can + actually work. I will also explain how the API is used, e.g. in a + modified Twisted reactor that gives multi-core capability to any + existing, non-thread-based Twisted program. From noreply at buildbot.pypy.org Tue Mar 20 18:47:22 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 18:47:22 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: tweak the title and expand the first sentence Message-ID: <20120320174722.EB01C8236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r4159:9902577fb8a3 Date: 2012-03-20 18:47 +0100 http://bitbucket.org/pypy/extradoc/changeset/9902577fb8a3/ Log: tweak the title and expand the first sentence diff --git a/talk/ep2012/stm/abstract.rst b/talk/ep2012/stm/abstract.rst --- a/talk/ep2012/stm/abstract.rst +++ b/talk/ep2012/stm/abstract.rst @@ -1,9 +1,10 @@ -PyPy: status and GIL-less future -================================ +PyPy: current status and GIL-less future +========================================= -In the first part of the talk we will present the current status and -speed of PyPy, the Python interpreter written in Python. +In the first part of the talk we will present what the current status of PyPy, +with a particular focus on what happened in the last year. We will give a +brief overview of the current speed and the on-going development efforts. The second part of the talk is about one particular feature whose development is in progress in PyPy: Automatic Mutual Exclusion. From noreply at buildbot.pypy.org Tue Mar 20 19:08:41 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 19:08:41 +0100 (CET) Subject: [pypy-commit] pypy default: Add a comment. Message-ID: <20120320180841.1C8518236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53839:52418dfbc2d5 Date: 2012-03-20 19:07 +0100 http://bitbucket.org/pypy/pypy/changeset/52418dfbc2d5/ Log: Add a comment. diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): From noreply at buildbot.pypy.org Tue Mar 20 19:08:56 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 20 Mar 2012 19:08:56 +0100 (CET) Subject: [pypy-commit] pypy py3k: skip rope marshal tests: ropes are broken in py3k Message-ID: <20120320180856.454B88236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53840:affdf51671da Date: 2012-03-20 18:07 +0100 http://bitbucket.org/pypy/pypy/changeset/affdf51671da/ Log: skip rope marshal tests: ropes are broken in py3k diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -182,6 +182,9 @@ class AppTestRope(AppTestMarshal): def setup_class(cls): + import py + # space.bytes_w(w_some_rope) doesn't work + py.test.skip('rope does not work') from pypy.conftest import gettestobjspace cls.space = gettestobjspace(**{"objspace.std.withrope": True}) AppTestMarshal.setup_class.im_func(cls) From noreply at buildbot.pypy.org Tue Mar 20 19:28:04 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 19:28:04 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: typo Message-ID: <20120320182804.998EE8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4160:8b07dd7f5f21 Date: 2012-03-20 19:27 +0100 http://bitbucket.org/pypy/extradoc/changeset/8b07dd7f5f21/ Log: typo diff --git a/talk/ep2012/stm/abstract.rst b/talk/ep2012/stm/abstract.rst --- a/talk/ep2012/stm/abstract.rst +++ b/talk/ep2012/stm/abstract.rst @@ -2,7 +2,7 @@ PyPy: current status and GIL-less future ========================================= -In the first part of the talk we will present what the current status of PyPy, +In the first part of the talk we will present the current status of PyPy, with a particular focus on what happened in the last year. We will give a brief overview of the current speed and the on-going development efforts. From noreply at buildbot.pypy.org Tue Mar 20 20:25:48 2012 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Mar 2012 20:25:48 +0100 (CET) Subject: [pypy-commit] pypy default: Pre-import a few built-in modules, because some programs actually rely Message-ID: <20120320192548.CA4B28236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53841:1c6dc3e6e70c Date: 2012-03-20 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/1c6dc3e6e70c/ Log: Pre-import a few built-in modules, because some programs actually rely on them to be in sys.modules :-( diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and From noreply at buildbot.pypy.org Tue Mar 20 23:37:08 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 20 Mar 2012 23:37:08 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: two loops for overload handling; one fast, one for collecting errors, if any Message-ID: <20120320223708.E58E68236A@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53842:b7b5987d12f7 Date: 2012-03-20 10:08 -0700 http://bitbucket.org/pypy/pypy/changeset/b7b5987d12f7/ Log: two loops for overload handling; one fast, one for collecting errors, if any diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -265,9 +265,26 @@ cppthis = capi.C_NULL_OBJECT assert lltype.typeOf(cppthis) == capi.C_OBJECT - space = self.space + # The following code tries out each of the functions in order. If + # argument conversion fails (or simply if the number of arguments do + # not match, that will lead to an exception, The JIT will snip out + # those (always) failing paths, but only if they have no side-effects. + # A second loop gathers all exceptions in the case all methods fail + # (the exception gathering would otherwise be a side-effect as far as + # the JIT is concerned). + # + # TODO: figure out what happens if a callback into from the C++ call + # raises a Python exception. + jit.promote(self) + for i in range(len(self.functions)): + cppyyfunc = self.functions[i] + try: + return cppyyfunc.call(cppthis, args_w) + except Exception: + pass + + # only get here if all overloads failed ... errmsg = 'None of the overloads matched:' - jit.promote(self) for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: @@ -275,7 +292,7 @@ except Exception, e: errmsg += '\n\t'+str(e) - raise OperationError(space.w_TypeError, space.wrap(errmsg)) + raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) def __repr__(self): return "W_CPPOverload(%s, %s)" % (self.func_name, self.functions) From noreply at buildbot.pypy.org Tue Mar 20 23:37:10 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 20 Mar 2012 23:37:10 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: enable hsimple.py in its full glory (note that it is now no longer completely CPU-bound, so less useful as a true benchmark, but fun nevertheless) Message-ID: <20120320223710.9B5D28236A@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53843:54eff2de135e Date: 2012-03-20 14:34 -0700 http://bitbucket.org/pypy/pypy/changeset/54eff2de135e/ Log: enable hsimple.py in its full glory (note that it is now no longer completely CPU-bound, so less useful as a true benchmark, but fun nevertheless) diff --git a/pypy/module/cppyy/bench/hsimple.C b/pypy/module/cppyy/bench/hsimple.C old mode 100755 new mode 100644 --- a/pypy/module/cppyy/bench/hsimple.C +++ b/pypy/module/cppyy/bench/hsimple.C @@ -6,15 +6,12 @@ #include #include #include -#include +#include #include #include -#include - TFile *hsimple(Int_t get=0) { - gROOT->SetBatch(); // This program creates : // - a one dimensional histogram // - a two dimensional histogram @@ -27,14 +24,11 @@ // The file "hsimple.root" is created in $ROOTSYS/tutorials if the caller has // write access to this directory, otherwise the file is created in $PWD -/* TString filename = "hsimple.root"; TString dir = gSystem->UnixPathName(gInterpreter->GetCurrentMacroName()); dir.ReplaceAll("hsimple.C",""); dir.ReplaceAll("/./","/"); - TFile *hfile = 0; - if (get) { // if the argument get =1 return the file "hsimple.root" // if the file does not exist, it is created @@ -60,13 +54,10 @@ return 0; } hfile = (TFile*)gROOT->FindObject(filename); if (hfile) hfile->Close(); -*/ -// hfile = new TFile(filename,"RECREATE","Demo ROOT file with histograms"); + hfile = new TFile(filename,"RECREATE","Demo ROOT file with histograms"); // Create some histograms, a profile histogram and an ntuple TH1F *hpx = new TH1F("hpx","This is the px distribution",100,-4,4); - hpx->Print(); -/* hpx->SetFillColor(48); TH2F *hpxpy = new TH2F("hpxpy","py vs px",40,-4,4,40,-4,4); TProfile *hprof = new TProfile("hprof","Profile of pz versus px",100,-4,4,0,20); @@ -81,20 +72,21 @@ c1->GetFrame()->SetBorderSize(6); c1->GetFrame()->SetBorderMode(-1); -*/ + // Fill histograms randomly - gRandom->SetSeed(); - Float_t px, py, pt; + TRandom3 random; + Float_t px, py, pz; const Int_t kUPDATE = 1000; - for (Int_t i = 0; i < 2500000; i++) { - gRandom->Rannor(px,py); - pt = sqrt(px*px + py*py); - // Float_t random = gRandom->Rndm(1); - hpx->Fill(pt); -/* + for (Int_t i = 0; i < 50000; i++) { + // random.Rannor(px,py); + px = random.Gaus(0, 1); + py = random.Gaus(0, 1); + pz = px*px + py*py; + Float_t rnd = random.Rndm(1); + hpx->Fill(px); hpxpy->Fill(px,py); hprof->Fill(px,pz); - ntuple->Fill(px,py,pz,random,i); + ntuple->Fill(px,py,pz,rnd,i); if (i && (i%kUPDATE) == 0) { if (i == kUPDATE) hpx->Draw(); c1->Modified(); @@ -102,9 +94,7 @@ if (gSystem->ProcessEvents()) break; } -*/ } -/* gBenchmark->Show("hsimple"); // Save all objects in this file @@ -112,9 +102,7 @@ hfile->Write(); hpx->SetFillColor(48); c1->Modified(); -*/ - hpx->Print(); - return 0;//hfile; + return hfile; // Note that the file is automatically close when application terminates // or when the file destructor is called. diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/cppyy/bench/hsimple.py --- a/pypy/module/cppyy/bench/hsimple.py +++ b/pypy/module/cppyy/bench/hsimple.py @@ -11,110 +11,87 @@ #*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* try: - import warnings - warnings.simplefilter("ignore") + import cppyy, random - import cppyy, random - cppyy.load_reflection_info('bench02Dict_reflex.so') - - app = cppyy.gbl.Bench02RootApp() TCanvas = cppyy.gbl.TCanvas TFile = cppyy.gbl.TFile TProfile = cppyy.gbl.TProfile TNtuple = cppyy.gbl.TNtuple TH1F = cppyy.gbl.TH1F TH2F = cppyy.gbl.TH2F - TRandom = cppyy.gbl.TRandom + TRandom3 = cppyy.gbl.TRandom3 + + gROOT = cppyy.gbl.gROOT + gBenchmark = cppyy.gbl.TBenchmark() + gSystem = cppyy.gbl.gSystem + except ImportError: - from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TRandom + from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TRandom3 + from ROOT import gROOT, gBenchmark, gSystem import random -import math - -#gROOT = cppyy.gbl.gROOT -#gBenchmark = cppyy.gbl.gBenchmark -#gRandom = cppyy.gbl.gRandom -#gSystem = cppyy.gbl.gSystem - -#gROOT.Reset() - -# Create a new canvas, and customize it. -#c1 = TCanvas( 'c1', 'Dynamic Filling Example', 200, 10, 700, 500 ) -#c1.SetFillColor( 42 ) -#c1.GetFrame().SetFillColor( 21 ) -#c1.GetFrame().SetBorderSize( 6 ) -#c1.GetFrame().SetBorderMode( -1 ) - # Create a new ROOT binary machine independent file. # Note that this file may contain any kind of ROOT objects, histograms, # pictures, graphics objects, detector geometries, tracks, events, etc.. # This file is now becoming the current directory. -#hfile = gROOT.FindObject( 'hsimple.root' ) -#if hfile: -# hfile.Close() -#hfile = TFile( 'hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' ) +hfile = gROOT.FindObject('hsimple.root') +if hfile: + hfile.Close() +hfile = TFile('hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' ) # Create some histograms, a profile histogram and an ntuple hpx = TH1F('hpx', 'This is the px distribution', 100, -4, 4) -hpx.Print() -#hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 ) -#hprof = TProfile( 'hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20 ) -#ntuple = TNtuple( 'ntuple', 'Demo ntuple', 'px:py:pz:random:i' ) +hpx.SetFillColor(48) +hpxpy = TH2F('hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4) +hprof = TProfile('hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20) +ntuple = TNtuple('ntuple', 'Demo ntuple', 'px:py:pz:random:i') -# Set canvas/frame attributes. -#hpx.SetFillColor( 48 ) +gBenchmark.Start('hsimple') -#gBenchmark.Start( 'hsimple' ) - -# Initialize random number generator. -#gRandom.SetSeed() -#rannor, rndm = gRandom.Rannor, gRandom.Rndm - -random = TRandom() -random.SetSeed(0) +# Create a new canvas, and customize it. +c1 = TCanvas('c1', 'Dynamic Filling Example', 200, 10, 700, 500) +c1.SetFillColor(42) +c1.GetFrame().SetFillColor(21) +c1.GetFrame().SetBorderSize(6) +c1.GetFrame().SetBorderMode(-1) # Fill histograms randomly. -#px, py = Double(), Double() +random = TRandom3() kUPDATE = 1000 -for i in xrange(2500000): - # Generate random values. -# px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) -# pt = (px*px + py*py)**0.5 - pt = math.sqrt(px*px + py*py) -# pt = (px*px + py*py) -# random = rndm(1) +for i in xrange(50000): + # Generate random numbers +# px, py = random.gauss(0, 1), random.gauss(0, 1) + px, py = random.Gaus(0, 1), random.Gaus(0, 1) + pz = px*px + py*py +# rnd = random.random() + rnd = random.Rndm(1) - # Fill histograms. - hpx.Fill(pt) -# hpxpyFill( px, py ) -# hprofFill( px, pz ) -# ntupleFill( px, py, pz, random, i ) + # Fill histograms + hpx.Fill(px) + hpxpy.Fill(px, py) + hprof.Fill(px, pz) + ntuple.Fill(px, py, pz, rnd, i) - # Update display every kUPDATE events. -# if i and i%kUPDATE == 0: -# if i == kUPDATE: -# hpx.Draw() + # Update display every kUPDATE events + if i and i%kUPDATE == 0: + if i == kUPDATE: + hpx.Draw() -# c1.Modified() -# c1.Update() + c1.Modified() + c1.Update() -# if gSystem.ProcessEvents(): # allow user interrupt -# break + if gSystem.ProcessEvents(): # allow user interrupt + break -#gBenchmark.Show( 'hsimple' ) +gBenchmark.Show( 'hsimple' ) -hpx.Print() - -# Save all objects in this file. -#hpx.SetFillColor( 0 ) -#hfile.Write() -#hfile.Close() -#hpx.SetFillColor( 48 ) -#c1.Modified() -#c1.Update() -#c1.Draw() +# Save all objects in this file +hpx.SetFillColor(0) +hfile.Write() +hpx.SetFillColor(48) +c1.Modified() +c1.Update() # Note that the file is automatically closed when application terminates # or when the file destructor is called. diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/cppyy/bench/hsimple_rflx.py new file mode 100755 --- /dev/null +++ b/pypy/module/cppyy/bench/hsimple_rflx.py @@ -0,0 +1,120 @@ +#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* +#*-* +#*-* This program creates : +#*-* - a one dimensional histogram +#*-* - a two dimensional histogram +#*-* - a profile histogram +#*-* - a memory-resident ntuple +#*-* +#*-* These objects are filled with some random numbers and saved on a file. +#*-* +#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* + +try: + import warnings + warnings.simplefilter("ignore") + + import cppyy, random + cppyy.load_reflection_info('bench02Dict_reflex.so') + + app = cppyy.gbl.Bench02RootApp() + TCanvas = cppyy.gbl.TCanvas + TFile = cppyy.gbl.TFile + TProfile = cppyy.gbl.TProfile + TNtuple = cppyy.gbl.TNtuple + TH1F = cppyy.gbl.TH1F + TH2F = cppyy.gbl.TH2F + TRandom = cppyy.gbl.TRandom +except ImportError: + from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TRandom + import random + +import math + +#gROOT = cppyy.gbl.gROOT +#gBenchmark = cppyy.gbl.gBenchmark +#gRandom = cppyy.gbl.gRandom +#gSystem = cppyy.gbl.gSystem + +#gROOT.Reset() + +# Create a new canvas, and customize it. +#c1 = TCanvas( 'c1', 'Dynamic Filling Example', 200, 10, 700, 500 ) +#c1.SetFillColor( 42 ) +#c1.GetFrame().SetFillColor( 21 ) +#c1.GetFrame().SetBorderSize( 6 ) +#c1.GetFrame().SetBorderMode( -1 ) + +# Create a new ROOT binary machine independent file. +# Note that this file may contain any kind of ROOT objects, histograms, +# pictures, graphics objects, detector geometries, tracks, events, etc.. +# This file is now becoming the current directory. + +#hfile = gROOT.FindObject( 'hsimple.root' ) +#if hfile: +# hfile.Close() +#hfile = TFile( 'hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' ) + +# Create some histograms, a profile histogram and an ntuple +hpx = TH1F('hpx', 'This is the px distribution', 100, -4, 4) +hpx.Print() +#hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 ) +#hprof = TProfile( 'hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20 ) +#ntuple = TNtuple( 'ntuple', 'Demo ntuple', 'px:py:pz:random:i' ) + +# Set canvas/frame attributes. +#hpx.SetFillColor( 48 ) + +#gBenchmark.Start( 'hsimple' ) + +# Initialize random number generator. +#gRandom.SetSeed() +#rannor, rndm = gRandom.Rannor, gRandom.Rndm + +random = TRandom() +random.SetSeed(0) + +# Fill histograms randomly. +#px, py = Double(), Double() +kUPDATE = 1000 +for i in xrange(2500000): + # Generate random values. +# px, py = random.gauss(0, 1), random.gauss(0, 1) + px, py = random.Gaus(0, 1), random.Gaus(0, 1) +# pt = (px*px + py*py)**0.5 + pt = math.sqrt(px*px + py*py) +# pt = (px*px + py*py) +# random = rndm(1) + + # Fill histograms. + hpx.Fill(pt) +# hpxpyFill( px, py ) +# hprofFill( px, pz ) +# ntupleFill( px, py, pz, random, i ) + + # Update display every kUPDATE events. +# if i and i%kUPDATE == 0: +# if i == kUPDATE: +# hpx.Draw() + +# c1.Modified() +# c1.Update() + +# if gSystem.ProcessEvents(): # allow user interrupt +# break + +#gBenchmark.Show( 'hsimple' ) + +hpx.Print() + +# Save all objects in this file. +#hpx.SetFillColor( 0 ) +#hfile.Write() +#hfile.Close() +#hpx.SetFillColor( 48 ) +#c1.Modified() +#c1.Update() +#c1.Draw() + +# Note that the file is automatically closed when application terminates +# or when the file destructor is called. From noreply at buildbot.pypy.org Tue Mar 20 23:37:11 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 20 Mar 2012 23:37:11 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: simplification Message-ID: <20120320223711.CFE048236A@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r53844:83c693bf6d3e Date: 2012-03-20 15:36 -0700 http://bitbucket.org/pypy/pypy/changeset/83c693bf6d3e/ Log: simplification diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -28,34 +28,13 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libMathCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -with rffi.scoped_str2charp('libRIO.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libGraf.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libGraf3d.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libGpad.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libTree.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libMatrix.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libNet.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libThread.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, - link_extra=["-lMathCore", "-lCore", "-lCint"], + link_extra=["-lCore", "-lCint"], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -73,10 +73,16 @@ TCppyyApplication(const char* acn, Int_t* argc, char** argv, Bool_t do_load = kTRUE) : TApplication(acn, argc, argv) { + // Explicitly load libMathCore as CINT will not auto load it when using one + // of its globals. Once moved to Cling, which should work correctly, we + // can remove this statement. + gSystem->Load("libMathCore"); + if (do_load) { // follow TRint to minimize differences with CINT ProcessLine("#include ", kTRUE); ProcessLine("#include <_string>", kTRUE); // for std::string iostream. + ProcessLine("#include ", kTRUE);// Defined R__EXTERN ProcessLine("#include ", kTRUE); // needed because they're used within the ProcessLine("#include ", kTRUE); // core ROOT dicts and CINT won't be able // to properly unload these files From noreply at buildbot.pypy.org Wed Mar 21 00:02:45 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 00:02:45 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: mingw compatability Message-ID: <20120320230245.6FBFF8236A@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53845:53ed3ee420f2 Date: 2012-03-20 00:56 +0200 http://bitbucket.org/pypy/pypy/changeset/53ed3ee420f2/ Log: mingw compatability diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} From noreply at buildbot.pypy.org Wed Mar 21 00:02:46 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 00:02:46 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge Message-ID: <20120320230246.B71D78236A@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53846:350196b36b4c Date: 2012-03-20 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/350196b36b4c/ Log: merge diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} From notifications-noreply at bitbucket.org Wed Mar 21 04:05:13 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Wed, 21 Mar 2012 03:05:13 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20120321030513.17350.99958@bitbucket01.managed.contegix.com> You have received a notification from uiappstore. Hi, I forked pypy. My fork is at https://bitbucket.org/uiappstore/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Wed Mar 21 05:18:43 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Mar 2012 05:18:43 +0100 (CET) Subject: [pypy-commit] pypy default: properly map the new llop to the right JVM method. Message-ID: <20120321041843.1520B8236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53847:da48d764ccab Date: 2012-03-21 04:17 +0000 http://bitbucket.org/pypy/pypy/changeset/da48d764ccab/ Log: properly map the new llop to the right JVM method. diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Wed Mar 21 07:35:57 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 07:35:57 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: allow CC environment variable to specify compiler Message-ID: <20120321063557.CC63E82112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53848:73a6f66d46a5 Date: 2012-03-21 08:35 +0200 http://bitbucket.org/pypy/pypy/changeset/73a6f66d46a5/ Log: allow CC environment variable to specify compiler diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -7,10 +7,12 @@ from pypy.translator.platform import log, _run_subprocess from pypy.translator.platform import Platform, posix -def Windows(cc=None): +def _get_compiler_type(cc, x64_flag): import subprocess if not cc: - return MsvcPlatform(cc=cc, x64=False) + cc = os.environ['CC'] + if not cc: + return MsvcPlatform(cc=cc, x64=x64_flag) elif cc.startswith('mingw'): return MingwPlatform(cc) try: @@ -20,18 +22,11 @@ return None return MingwPlatform(cc) +def Windows(cc=None): + return _get_compiler_type(cc, False) + def Windows_x64(cc=None): - import subprocess - if not cc: - return MsvcPlatform(cc=cc, x64=True) - elif cc.startswith('mingw'): - return MingwPlatform(cc) - try: - subprocess.check_output([cc, '--version']) - except: - log.error("Unknown cc option '%s'"%cc) - return None - return MingwPlatform(cc) + return _get_compiler_type(cc, True) def _get_msvc_env(vsver, x64flag): try: From pullrequests-noreply at bitbucket.org Wed Mar 21 08:16:58 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Wed, 21 Mar 2012 07:16:58 -0000 Subject: [pypy-commit] [pypy/pypy] add ndmin param to numpypy.array (pull request #64) In-Reply-To: <9d47b22e90a590324f6a58b11a33a0e7@bitbucket.org> References: <9d47b22e90a590324f6a58b11a33a0e7@bitbucket.org> Message-ID: <20120321071658.27598.25017@bitbucket15.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/64/add-ndmin-param-to-numpypyarray#comment-4173 Michael Blume (MichaelBlume) said: reopening of https://bitbucket.org/pypy/pypy/pull-request/63/add-ndmin-param-to-numpyarray -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Wed Mar 21 08:22:14 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 08:22:14 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: whoops Message-ID: <20120321072214.E8BBF82112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53849:7e56ab425f21 Date: 2012-03-21 09:21 +0200 http://bitbucket.org/pypy/pypy/changeset/7e56ab425f21/ Log: whoops diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -10,7 +10,7 @@ def _get_compiler_type(cc, x64_flag): import subprocess if not cc: - cc = os.environ['CC'] + cc = os.environ.get('CC','') if not cc: return MsvcPlatform(cc=cc, x64=x64_flag) elif cc.startswith('mingw'): From noreply at buildbot.pypy.org Wed Mar 21 09:55:08 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 09:55:08 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: better error message, fix for mingw Message-ID: <20120321085508.3481482112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53850:5e93d0d1d2c2 Date: 2012-03-21 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/5e93d0d1d2c2/ Log: better error message, fix for mingw diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -141,6 +141,10 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include + #ifdef __GNUC__ + #define _dosmaperr mingw_dosmaperr + #endif int main() { int i; diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -18,8 +18,8 @@ try: subprocess.check_output([cc, '--version']) except: - log.error("Unknown cc option '%s'"%cc) - return None + raise ValueError,"Could not find compiler specified by cc option" + \ + " '%s', it must be a valid exe file on your path"%cc return MingwPlatform(cc) def Windows(cc=None): From noreply at buildbot.pypy.org Wed Mar 21 11:24:27 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 11:24:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill the buffer() builtin; the internal implementation is still used by memoryview. Also, refactor the buffer tests to use memoryview when possible. test_buffer passes with -A Message-ID: <20120321102427.D683F82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53851:6ba48a9e546e Date: 2012-03-21 10:54 +0100 http://bitbucket.org/pypy/pypy/changeset/6ba48a9e546e/ Log: kill the buffer() builtin; the internal implementation is still used by memoryview. Also, refactor the buffer tests to use memoryview when possible. test_buffer passes with -A diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -38,7 +38,6 @@ '__debug__' : '(space.w_True)', # XXX 'type' : '(space.w_type)', 'object' : '(space.w_object)', - 'buffer' : 'interp_memoryview.W_Buffer', 'memoryview' : 'interp_memoryview.W_MemoryView', 'open' : 'state.get(space).w_open', diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -8,9 +8,6 @@ from pypy.interpreter.error import OperationError import operator -W_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer - - class W_MemoryView(Wrappable): """Implement the built-in 'memoryview' type as a thin wrapper around an interp-level buffer. diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -3,188 +3,64 @@ import autopath from pypy.conftest import gettestobjspace -class AppTestBuffer: +class AppTestMemoryView: + spaceconfig = dict(usemodules=['array']) - def test_unicode_buffer(self): - import sys - b = buffer(u"ab") - if sys.maxunicode == 65535: # UCS2 build - assert len(b) == 4 - if sys.byteorder == "big": - assert b[0:4] == b"\x00a\x00b" - else: - assert b[0:4] == b"a\x00b\x00" - else: # UCS4 build - assert len(b) == 8 - if sys.byteorder == "big": - assert b[0:8] == b"\x00\x00\x00a\x00\x00\x00b" - else: - assert b[0:8] == b"a\x00\x00\x00b\x00\x00\x00" - - def test_array_buffer(self): - import array - b = buffer(array.array("B", [1, 2, 3])) - assert len(b) == 3 - assert b[0:3] == b"\x01\x02\x03" - - def test_nonzero(self): - assert buffer('\x00') - assert not buffer('') - import array - assert buffer(array.array("B", [0])) - assert not buffer(array.array("B", [])) - - def test_str(self): - assert str(buffer(b'hello')) == 'hello' - - def test_repr(self): - # from 2.5.2 lib tests - assert repr(buffer(b'hello')).startswith(' buffer(b'ab')) - assert buffer(b'ab') >= buffer(b'ab') - assert buffer(b'ab') != buffer(b'abc') - assert buffer(b'ab') < buffer(b'abc') - assert buffer(b'ab') <= buffer(b'ab') - assert buffer(b'ab') > buffer(b'aa') - assert buffer(b'ab') >= buffer(b'ab') - - def test_hash(self): - assert hash(buffer(b'hello')) == hash(b'hello') - - def test_mul(self): - assert buffer(b'ab') * 5 == b'ababababab' - assert buffer(b'ab') * (-2) == b'' - assert 5 * buffer(b'ab') == b'ababababab' - assert (-2) * buffer(b'ab') == b'' - - def test_offset_size(self): - b = buffer(b'hello world', 6) - assert len(b) == 5 - assert b[0] == b'w' - assert b[:] == b'world' - raises(IndexError, 'b[5]') - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == b'r' - assert b[:] == b'rld' - raises(IndexError, 'b[3]') - b = buffer(b'hello world', 1, 8) - assert len(b) == 8 - assert b[0] == b'e' - assert b[:] == b'ello wor' - raises(IndexError, 'b[8]') - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == b' ' - assert b[:] == b'lo ' - raises(IndexError, 'b[3]') - b = buffer('hello world', 55) - assert len(b) == 0 - assert b[:] == b'' - b = buffer(b'hello world', 6, 999) - assert len(b) == 5 - assert b[:] == b'world' - - raises(ValueError, buffer, "abc", -1) - raises(ValueError, buffer, "abc", 0, -2) - - def test_rw_offset_size(self): - import array - - a = array.array("b", b'hello world') - b = buffer(a, 6) - assert len(b) == 5 - assert b[0] == b'w' - assert b[:] == b'world' - raises(IndexError, 'b[5]') - b[0] = b'W' - assert str(b) == b'World' - assert a.tostring() == b'hello World' - b[:] = b'12345' - assert a.tostring() == b'hello 12345' - raises(IndexError, 'b[5] = "."') - - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == '3' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'hello 123X5' - raises(IndexError, 'b[3] = "."') - - a = array.array("c", 'hello world') - b = buffer(a, 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b[0] = 'E' - assert str(b) == 'Ello wor' - assert a.tostring() == 'hEllo world' - b[:] = '12345678' - assert a.tostring() == 'h12345678ld' - raises(IndexError, 'b[8] = "."') - - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == '5' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'h123X5678ld' - raises(IndexError, 'b[3] = "."') - - b = buffer(a, 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer(a, 6, 999) - assert len(b) == 5 - assert b[:] == '678ld' - - raises(ValueError, buffer, a, -1) - raises(ValueError, buffer, a, 0, -2) - - def test_slice(self): - # Test extended slicing by comparing with list slicing. - s = bytes(c for c in list(range(255, -1, -1))) - b = buffer(s) - indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300) - for start in indices: - for stop in indices: - # Skip step 0 (invalid) - for step in indices[1:]: - assert b[start:stop:step] == s[start:stop:step] - -class AppTestMemoryView: def test_basic(self): v = memoryview(b"abc") assert v.tobytes() == b"abc" assert len(v) == 3 - assert list(v) == ['a', 'b', 'c'] + assert list(v) == [b'a', b'b', b'c'] assert v.tolist() == [97, 98, 99] - assert v[1] == "b" - assert v[-1] == "c" + assert v[1] == b"b" + assert v[-1] == b"c" raises(TypeError, "v[1] = 'x'") assert v.readonly is True w = v[1:234] assert isinstance(w, memoryview) assert len(w) == 2 + def test_array_buffer(self): + import array + b = memoryview(array.array("B", [1, 2, 3])) + assert len(b) == 3 + assert b[0:3] == b"\x01\x02\x03" + + def test_nonzero(self): + assert memoryview(b'\x00') + assert not memoryview(b'') + import array + assert memoryview(array.array("B", [0])) + assert not memoryview(array.array("B", [])) + + def test_bytes(self): + assert bytes(memoryview(b'hello')) == b'hello' + + def test_repr(self): + assert repr(memoryview(b'hello')).startswith(' memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') < memoryview(b'abc')") + raises(TypeError, "memoryview(b'ab') <= memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') > memoryview(b'aa')") + raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") + + def test_hash(self): + raises(TypeError, "hash(memoryview(b'hello'))") + def test_rw(self): data = bytearray(b'abcefg') v = memoryview(data) @@ -206,11 +82,3 @@ def test_suboffsets(self): v = memoryview(b"a"*100) assert v.suboffsets == None - v = memoryview(buffer(b"a"*100, 2)) - assert v.shape == (98,) - assert v.suboffsets == None - - def test_compare(self): - assert memoryview(b"abc") == b"abc" - assert memoryview(b"abc") == bytearray(b"abc") - assert memoryview(b"abc") != 3 From noreply at buildbot.pypy.org Wed Mar 21 11:24:29 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 11:24:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: memoryview.__getitem__ must return bytes, not str Message-ID: <20120321102429.219BD82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53852:af2c40cbec35 Date: 2012-03-21 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/af2c40cbec35/ Log: memoryview.__getitem__ must return bytes, not str diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -81,10 +81,10 @@ def descr_getitem(self, space, w_index): start, stop, step = space.decode_index(w_index, self.getlength()) if step == 0: # index only - return space.wrap(self.buf.getitem(start)) + return space.wrapbytes(self.buf.getitem(start)) elif step == 1: res = self.getslice(start, stop) - return space.wrap(res) + return space.wrapbytes(res) else: raise OperationError(space.w_ValueError, space.wrap("memoryview object does not support" diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -11,6 +11,7 @@ v = memoryview(b"abc") assert v.tobytes() == b"abc" assert len(v) == 3 + assert v[0] == b'a' assert list(v) == [b'a', b'b', b'c'] assert v.tolist() == [97, 98, 99] assert v[1] == b"b" From noreply at buildbot.pypy.org Wed Mar 21 11:24:30 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 11:24:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: add a repr similar to cpython Message-ID: <20120321102430.58D1982112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53853:1064b91af6cd Date: 2012-03-21 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/1064b91af6cd/ Log: add a repr similar to cpython diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -118,6 +118,9 @@ # I've never seen anyone filling this field return space.w_None + def descr_repr(self, space): + return self.getrepr(space, 'memory') + def descr_new(space, w_subtype, w_object): memoryview = W_MemoryView(space.buffer(w_object)) @@ -139,6 +142,7 @@ __lt__ = interp2app(W_MemoryView.descr_lt), __ne__ = interp2app(W_MemoryView.descr_ne), __setitem__ = interp2app(W_MemoryView.descr_setitem), + __repr__ = interp2app(W_MemoryView.descr_repr), tobytes = interp2app(W_MemoryView.descr_tobytes), tolist = interp2app(W_MemoryView.descr_tolist), format = GetSetProperty(W_MemoryView.w_get_format), From noreply at buildbot.pypy.org Wed Mar 21 11:24:31 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 11:24:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: bah, of course when we do a slice we need to wrap() the resulting memoryview, not wrapbytes() it. This makes test_basic passing Message-ID: <20120321102431.B185782112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53854:904bd04ac5b7 Date: 2012-03-21 11:22 +0100 http://bitbucket.org/pypy/pypy/changeset/904bd04ac5b7/ Log: bah, of course when we do a slice we need to wrap() the resulting memoryview, not wrapbytes() it. This makes test_basic passing diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -84,7 +84,7 @@ return space.wrapbytes(self.buf.getitem(start)) elif step == 1: res = self.getslice(start, stop) - return space.wrapbytes(res) + return space.wrap(res) else: raise OperationError(space.w_ValueError, space.wrap("memoryview object does not support" From noreply at buildbot.pypy.org Wed Mar 21 11:24:32 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 11:24:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: forbid comparisons between memoryviews. test_buffer now passes Message-ID: <20120321102432.EA0E582112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53855:2aa0f995071b Date: 2012-03-21 11:24 +0100 http://bitbucket.org/pypy/pypy/changeset/2aa0f995071b/ Log: forbid comparisons between memoryviews. test_buffer now passes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -41,10 +41,6 @@ descr_eq = _make_descr__cmp('eq') descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') def as_str(self): return self.buf.as_str() @@ -134,12 +130,8 @@ __new__ = interp2app(descr_new), __buffer__ = interp2app(W_MemoryView.descr_buffer), __eq__ = interp2app(W_MemoryView.descr_eq), - __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), - __gt__ = interp2app(W_MemoryView.descr_gt), - __le__ = interp2app(W_MemoryView.descr_le), __len__ = interp2app(W_MemoryView.descr_len), - __lt__ = interp2app(W_MemoryView.descr_lt), __ne__ = interp2app(W_MemoryView.descr_ne), __setitem__ = interp2app(W_MemoryView.descr_setitem), __repr__ = interp2app(W_MemoryView.descr_repr), From noreply at buildbot.pypy.org Wed Mar 21 12:20:36 2012 From: noreply at buildbot.pypy.org (Aaron Iles) Date: Wed, 21 Mar 2012 12:20:36 +0100 (CET) Subject: [pypy-commit] pypy bytearray-refactor: Refactor capitalize() method into base class. Message-ID: <20120321112036.C69DC82112@wyvern.cs.uni-duesseldorf.de> Author: Aaron Iles Branch: bytearray-refactor Changeset: r53856:111c05553722 Date: 2012-03-21 21:32 +1100 http://bitbucket.org/pypy/pypy/changeset/111c05553722/ Log: Refactor capitalize() method into base class. diff --git a/pypy/objspace/std/abstractstring.py b/pypy/objspace/std/abstractstring.py --- a/pypy/objspace/std/abstractstring.py +++ b/pypy/objspace/std/abstractstring.py @@ -28,6 +28,9 @@ def istitle(w_self, space): return w_self._title(space) + def capitalize(w_self, space): + return w_self._capitalize(space) + def lower(w_self, space): return w_self._transform(space, w_self._lower) @@ -165,6 +168,20 @@ return space.newbool(cased) + def _capitalize(w_self, space): + sz = w_self.length(space) + it = w_self.iterator(space) + bd = w_self.builder(space, sz) + if sz > 0: + ch = it.nextchar() + ch = w_self._upper(ch) + bd.append(ch) + for i in range(1, sz): + ch = it.nextchar() + ch = w_self._lower(ch) + bd.append(ch) + return w_self.construct(space, bd.build()) + @specialize.arg(2) def _transform(w_self, space, func): sz = w_self.length(space) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -332,6 +332,15 @@ def str_swapcase__Bytearray(space, w_self): return w_self.swapcase(space) +def str_capitalize__Bytearray(space, w_bytearray): + return w_self.capitalize(space) + +def str_capitalize__Bytearray(space, w_bytearray): + w_str = str__Bytearray(space, w_bytearray) + w_res = stringobject.str_capitalize__String(space, w_str) + return String2Bytearray(space, w_res) + + def str_count__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): char = w_char.intval bytearray = w_bytearray.data @@ -490,11 +499,6 @@ w_res = stringobject.str_title__String(space, w_str) return String2Bytearray(space, w_res) -def str_capitalize__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_capitalize__String(space, w_str) - return String2Bytearray(space, w_res) - def str_ljust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): w_str = str__Bytearray(space, w_bytearray) w_res = stringobject.str_ljust__String_ANY_ANY(space, w_str, w_width, diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -135,30 +135,7 @@ return w_self.upper(space) def str_capitalize__Rope(space, w_self): - node = w_self._node - length = node.length() - buffer = [' '] * length - if length > 0: - iter = rope.ItemIterator(node) - ch = iter.nextchar() - if ch.islower(): - o = ord(ch) - 32 - buffer[0] = chr(o) - else: - buffer[0] = ch - - for i in range(1, length): - ch = iter.nextchar() - if ch.isupper(): - o = ord(ch) + 32 - buffer[i] = chr(o) - else: - buffer[i] = ch - else: - return W_RopeObject.EMPTY - - return W_RopeObject(rope.rope_from_charlist(buffer)) - + return w_self.capitalize(space) def str_title__Rope(space, w_self): node = w_self._node diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py --- a/pypy/objspace/std/ropeunicodeobject.py +++ b/pypy/objspace/std/ropeunicodeobject.py @@ -91,6 +91,13 @@ return rope.rope_from_unicharlist(self.data) +class UnicodeIterator(rope.ItemIterator): + """"Iterate over unicode characters from rope iterator""" + + def nextchar(self): + return self.nextunichar() + + class W_RopeUnicodeObject(unicodeobject.W_AbstractUnicodeObject): from pypy.objspace.std.unicodetype import unicode_typedef as typedef _immutable_fields_ = ['_node'] @@ -105,7 +112,7 @@ return W_RopeUnicodeObject(data) def iterator(w_self, space): - return rope.ItemIterator(w_self._node) + return UnicodeIterator(w_self._node) def length(w_self, space): return w_self._node.length() @@ -395,18 +402,6 @@ return space.call_method(w_self, 'rstrip', unicode_from_string(space, w_chars)) -def unicode_capitalize__RopeUnicode(space, w_self): - input = w_self._node - length = input.length() - if length == 0: - return w_self - result = [u'\0'] * length - iter = rope.ItemIterator(input) - result[0] = unichr(unicodedb.toupper(iter.nextint())) - for i in range(1, length): - result[i] = unichr(unicodedb.tolower(iter.nextint())) - return W_RopeUnicodeObject(rope.rope_from_unicharlist(result)) - def unicode_title__RopeUnicode(space, w_self): input = w_self._node length = input.length() @@ -434,6 +429,9 @@ def unicode_swapcase__RopeUnicode(space, w_self): return w_self.swapcase(space) +def unicode_capitalize__RopeUnicode(space, w_self): + return w_self.capitalize(space) + def _convert_idx_params(space, w_self, w_start, w_end): self = w_self._node length = w_self._node.length() diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -154,25 +154,7 @@ return w_self.upper(space) def str_capitalize__String(space, w_self): - input = w_self._value - builder = StringBuilder(len(input)) - if len(input) > 0: - ch = input[0] - if ch.islower(): - o = ord(ch) - 32 - builder.append(chr(o)) - else: - builder.append(ch) - - for i in range(1, len(input)): - ch = input[i] - if ch.isupper(): - o = ord(ch) + 32 - builder.append(chr(o)) - else: - builder.append(ch) - - return space.wrap(builder.build()) + return w_self.capitalize(space) def str_title__String(space, w_self): input = w_self._value diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -437,16 +437,6 @@ unicode_rstrip__Unicode_Rope = unicode_rstrip__Unicode_String -def unicode_capitalize__Unicode(space, w_self): - input = w_self._value - if len(input) == 0: - return W_UnicodeObject.EMPTY - builder = UnicodeBuilder(len(input)) - builder.append(unichr(unicodedb.toupper(ord(input[0])))) - for i in range(1, len(input)): - builder.append(unichr(unicodedb.tolower(ord(input[i])))) - return W_UnicodeObject(builder.build()) - def unicode_title__Unicode(space, w_self): input = w_self._value if len(input) == 0: @@ -463,6 +453,9 @@ previous_is_cased = unicodedb.iscased(unichar) return W_UnicodeObject(builder.build()) +def unicode_capitalize__Unicode(space, w_self): + return w_self.capitalize(space) + def unicode_lower__Unicode(space, w_self): return w_self.lower(space) From noreply at buildbot.pypy.org Wed Mar 21 12:27:33 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: we no longer have longs Message-ID: <20120321112733.1079382112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53857:5564b4aebac9 Date: 2012-03-21 12:03 +0100 http://bitbucket.org/pypy/pypy/changeset/5564b4aebac9/ Log: we no longer have longs diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -173,14 +173,13 @@ if self.cpython_apptest: skip("cpython behaves differently") assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x20000000000000005' assert id(0.0) == 5 def test_id_on_strs(self): From noreply at buildbot.pypy.org Wed Mar 21 12:27:34 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt unicode-->str and str-->bytes Message-ID: <20120321112734.A90C882112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53858:5cb184143a13 Date: 2012-03-21 12:09 +0100 http://bitbucket.org/pypy/pypy/changeset/5cb184143a13/ Log: adapt unicode-->str and str-->bytes diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -19,9 +19,9 @@ def w_unwrap_wrap_unicode(space, w_obj): return space.wrap(space.unicode_w(w_obj)) cls.w_unwrap_wrap_unicode = space.wrap(gateway.interp2app(w_unwrap_wrap_unicode)) - def w_unwrap_wrap_str(space, w_obj): - return space.wrap(space.str_w(w_obj)) - cls.w_unwrap_wrap_str = space.wrap(gateway.interp2app(w_unwrap_wrap_str)) + def w_unwrap_wrap_bytes(space, w_obj): + return space.wrapbytes(space.bytes_w(w_obj)) + cls.w_unwrap_wrap_bytes = space.wrap(gateway.interp2app(w_unwrap_wrap_bytes)) def test_hash_builtin(self): if not self.cpython_behavior: @@ -142,10 +142,10 @@ skip("cannot run this test as apptest") l = ["a"] assert l[0] is l[0] - u = u"a" + u = "a" assert self.unwrap_wrap_unicode(u) is u - s = "a" - assert self.unwrap_wrap_str(s) is s + s = b"a" + assert self.unwrap_wrap_bytes(s) is s def test_is_on_subclasses(self): for typ in [int, long, float, complex, str, unicode]: @@ -185,10 +185,10 @@ def test_id_on_strs(self): if self.appdirect: skip("cannot run this test as apptest") - u = u"a" + u = "a" assert id(self.unwrap_wrap_unicode(u)) == id(u) - s = "a" - assert id(self.unwrap_wrap_str(s)) == id(s) + s = b"a" + assert id(self.unwrap_wrap_bytes(s)) == id(s) def test_identity_vs_id_primitives(self): if self.cpython_apptest: @@ -226,19 +226,19 @@ if self.appdirect: skip("cannot run this test as apptest") import sys - l = range(-10, 10) + l = list(range(-10, 10)) for i in range(10): - s = str(i) + s = bytes(i) l.append(s) - l.append(self.unwrap_wrap_str(s)) - u = unicode(s) + l.append(self.unwrap_wrap_bytes(s)) + u = str(s) l.append(u) l.append(self.unwrap_wrap_unicode(u)) + s = b"s" + l.append(s) + l.append(self.unwrap_wrap_bytes(s)) s = "s" l.append(s) - l.append(self.unwrap_wrap_str(s)) - s = u"s" - l.append(s) l.append(self.unwrap_wrap_unicode(s)) for i, a in enumerate(l): From noreply at buildbot.pypy.org Wed Mar 21 12:27:35 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: explicitly make a list Message-ID: <20120321112735.EAB3582112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53859:92a08fea88b8 Date: 2012-03-21 12:10 +0100 http://bitbucket.org/pypy/pypy/changeset/92a08fea88b8/ Log: explicitly make a list diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -37,7 +37,7 @@ assert hash(o) == o.__hash__() def test_hash_list(self): - l = range(5) + l = list(range(5)) raises(TypeError, hash, l) def test_no_getnewargs(self): From noreply at buildbot.pypy.org Wed Mar 21 12:27:37 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: format() now works only on str, kill the old __unicode__ stuff Message-ID: <20120321112737.3D1C182112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53860:297ffe6976a1 Date: 2012-03-21 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/297ffe6976a1/ Log: format() now works only on str, kill the old __unicode__ stuff diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -64,18 +64,9 @@ class x(object): def __str__(self): return "Pickle" - def __unicode__(self): - return u"Cheese" res = format(x()) assert res == "Pickle" assert isinstance(res, str) - res = format(x(), u"") - assert res == u"Cheese" - assert isinstance(res, unicode) - del x.__unicode__ - res = format(x(), u"") - assert res == u"Pickle" - assert isinstance(res, unicode) def test_subclasshook(self): class x(object): From noreply at buildbot.pypy.org Wed Mar 21 12:27:38 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill longs Message-ID: <20120321112738.7DBF982112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53861:fd7de37d755c Date: 2012-03-21 12:14 +0100 http://bitbucket.org/pypy/pypy/changeset/fd7de37d755c/ Log: kill longs diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -114,14 +114,12 @@ x = 1000000 assert x + 1 is int(str(x + 1)) assert 1 is not 1.0 - assert 1 is not 1l - assert 1l is not 1.0 assert 1.1 is 1.1 assert 0.0 is not -0.0 for x in range(10): assert x + 0.1 is x + 0.1 for x in range(10): - assert x + 1L is x + 1L + assert x + 1 is x + 1 for x in range(10): assert x+1j is x+1j assert 1+x*1j is 1+x*1j From noreply at buildbot.pypy.org Wed Mar 21 12:27:39 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:39 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill long and unicode Message-ID: <20120321112739.C10DB82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53862:6ba6cbf1a740 Date: 2012-03-21 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6ba6cbf1a740/ Log: kill long and unicode diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -137,10 +137,10 @@ assert self.unwrap_wrap_bytes(s) is s def test_is_on_subclasses(self): - for typ in [int, long, float, complex, str, unicode]: + for typ in [int, float, complex, str]: class mytyp(typ): pass - if not self.cpython_apptest and typ not in (str, unicode): + if not self.cpython_apptest and typ is not str: assert typ(42) is typ(42) assert mytyp(42) is not mytyp(42) assert mytyp(42) is not typ(42) From noreply at buildbot.pypy.org Wed Mar 21 12:27:41 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill longs, unicode, range() --> list(range()) Message-ID: <20120321112741.0B62B82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53863:03caac737b17 Date: 2012-03-21 12:19 +0100 http://bitbucket.org/pypy/pypy/changeset/03caac737b17/ Log: kill longs, unicode, range() --> list(range()) diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -183,18 +183,17 @@ if self.cpython_apptest: skip("cpython behaves differently") import sys - l = range(-10, 10) + l = list(range(-10, 10)) for i in range(10): l.append(float(i)) l.append(i + 0.1) - l.append(long(i)) l.append(i + sys.maxint) l.append(i - sys.maxint) l.append(i + 1j) l.append(1 + i * 1j) s = str(i) l.append(s) - u = unicode(s) + u = bytes(s, 'ascii') l.append(u) l.append(-0.0) l.append(None) @@ -202,7 +201,7 @@ l.append(False) s = "s" l.append(s) - s = u"s" + s = b"s" l.append(s) for i, a in enumerate(l): @@ -237,7 +236,7 @@ assert a == b def test_identity_bug(self): - x = 0x4000000000000000L + x = 0x4000000000000000 y = 2j assert id(x) != id(y) From noreply at buildbot.pypy.org Wed Mar 21 12:27:42 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix this test and add a comment Message-ID: <20120321112742.4990082112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53864:2186005942d0 Date: 2012-03-21 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/2186005942d0/ Log: fix this test and add a comment diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -252,4 +252,8 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None - space.isinstance_w(w_a, space.w_str) # does not crash + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) + space.isinstance_w(w_a, space.w_unicode) # does not crash + From noreply at buildbot.pypy.org Wed Mar 21 12:27:43 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 12:27:43 +0100 (CET) Subject: [pypy-commit] pypy default: add a comment to explain this test Message-ID: <20120321112743.82E1D82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53865:b991bfef2fcd Date: 2012-03-21 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/b991bfef2fcd/ Log: add a comment to explain this test diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash From noreply at buildbot.pypy.org Wed Mar 21 12:48:55 2012 From: noreply at buildbot.pypy.org (aliles) Date: Wed, 21 Mar 2012 12:48:55 +0100 (CET) Subject: [pypy-commit] pypy bytearray-refactor: Refactor out title() method into base class. Message-ID: <20120321114855.9BCC182112@wyvern.cs.uni-duesseldorf.de> Author: Aaron Iles Branch: bytearray-refactor Changeset: r53866:da00b35c920c Date: 2012-03-21 22:46 +1100 http://bitbucket.org/pypy/pypy/changeset/da00b35c920c/ Log: Refactor out title() method into base class. diff --git a/pypy/objspace/std/abstractstring.py b/pypy/objspace/std/abstractstring.py --- a/pypy/objspace/std/abstractstring.py +++ b/pypy/objspace/std/abstractstring.py @@ -26,7 +26,7 @@ w_self._isupper, w_self._islower) def istitle(w_self, space): - return w_self._title(space) + return w_self._istitle(space) def capitalize(w_self, space): return w_self._capitalize(space) @@ -37,6 +37,9 @@ def swapcase(w_self, space): return w_self._transform(space, w_self._swapcase) + def title(w_self, space): + return w_self._title(space) + def upper(w_self, space): return w_self._transform(space, w_self._upper) @@ -147,7 +150,7 @@ status = True return space.newbool(status) - def _title(w_self, space): + def _istitle(w_self, space): input = w_self.unwrap(space) cased = False previous_is_cased = False @@ -182,6 +185,24 @@ bd.append(ch) return w_self.construct(space, bd.build()) + def _title(w_self, space): + sz = w_self.length(space) + if sz == 0: + return w_self + it = w_self.iterator(space) + bd = w_self.builder(space, sz) + pv = ' ' + for i in range(sz): + ch = it.nextchar() + if not w_self._isalpha(pv): + ch = w_self._upper(ch) + bd.append(ch) + else: + ch = w_self._lower(ch) + bd.append(ch) + pv = ch + return w_self.construct(space, bd.build()) + @specialize.arg(2) def _transform(w_self, space, func): sz = w_self.length(space) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -332,14 +332,11 @@ def str_swapcase__Bytearray(space, w_self): return w_self.swapcase(space) -def str_capitalize__Bytearray(space, w_bytearray): +def str_capitalize__Bytearray(space, w_self): return w_self.capitalize(space) -def str_capitalize__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_capitalize__String(space, w_str) - return String2Bytearray(space, w_res) - +def str_title__Bytearray(space, w_self): + return w_self.title(space) def str_count__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): char = w_char.intval @@ -494,11 +491,6 @@ w_str2, w_max) return String2Bytearray(space, w_res) -def str_title__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_title__String(space, w_str) - return String2Bytearray(space, w_res) - def str_ljust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): w_str = str__Bytearray(space, w_bytearray) w_res = stringobject.str_ljust__String_ANY_ANY(space, w_str, w_width, diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -138,22 +138,7 @@ return w_self.capitalize(space) def str_title__Rope(space, w_self): - node = w_self._node - length = node.length() - buffer = [' '] * length - prev_letter = ' ' - - iter = rope.ItemIterator(node) - for pos in range(0, length): - ch = iter.nextchar() - if not prev_letter.isalpha(): - buffer[pos] = w_self._upper(ch) - else: - buffer[pos] = w_self._lower(ch) - - prev_letter = buffer[pos] - - return W_RopeObject(rope.rope_from_charlist(buffer)) + return w_self.title(space) def str_split__Rope_None_ANY(space, w_self, w_none, w_maxsplit=-1): selfnode = w_self._node diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py --- a/pypy/objspace/std/ropeunicodeobject.py +++ b/pypy/objspace/std/ropeunicodeobject.py @@ -403,22 +403,7 @@ unicode_from_string(space, w_chars)) def unicode_title__RopeUnicode(space, w_self): - input = w_self._node - length = input.length() - if length == 0: - return w_self - result = [u'\0'] * length - iter = rope.ItemIterator(input) - - previous_is_cased = False - for i in range(input.length()): - unichar = iter.nextint() - if previous_is_cased: - result[i] = unichr(unicodedb.tolower(unichar)) - else: - result[i] = unichr(unicodedb.totitle(unichar)) - previous_is_cased = unicodedb.iscased(unichar) - return W_RopeUnicodeObject(rope.rope_from_unicharlist(result)) + return w_self.title(space) def unicode_lower__RopeUnicode(space, w_self): return w_self.lower(space) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -128,20 +128,12 @@ return w_self.isspace(space) def str_islower__String(space, w_self): - """Return True if all cased characters in S are lowercase and there is -at least one cased character in S, False otherwise.""" return w_self.islower(space) def str_isupper__String(space, w_self): - """Return True if all cased characters in S are uppercase and there is -at least one cased character in S, False otherwise.""" return w_self.isupper(space) def str_istitle__String(space, w_self): - """Return True if S is a titlecased string and there is at least one -character in S, i.e. uppercase characters may only follow uncased -characters and lowercase characters only cased ones. Return False -otherwise.""" return w_self.istitle(space) def str_lower__String(space, w_self): @@ -157,22 +149,7 @@ return w_self.capitalize(space) def str_title__String(space, w_self): - input = w_self._value - builder = StringBuilder(len(input)) - prev_letter=' ' - - for pos in range(len(input)): - ch = input[pos] - if not prev_letter.isalpha(): - ch = w_self._upper(ch) - builder.append(ch) - else: - ch = w_self._lower(ch) - builder.append(ch) - - prev_letter = ch - - return space.wrap(builder.build()) + return w_self.title(space) def str_split__String_None_ANY(space, w_self, w_none, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -438,20 +438,7 @@ unicode_rstrip__Unicode_Rope = unicode_rstrip__Unicode_String def unicode_title__Unicode(space, w_self): - input = w_self._value - if len(input) == 0: - return w_self - builder = UnicodeBuilder(len(input)) - - previous_is_cased = False - for i in range(len(input)): - unichar = ord(input[i]) - if previous_is_cased: - builder.append(unichr(unicodedb.tolower(unichar))) - else: - builder.append(unichr(unicodedb.totitle(unichar))) - previous_is_cased = unicodedb.iscased(unichar) - return W_UnicodeObject(builder.build()) + return w_self.title(space) def unicode_capitalize__Unicode(space, w_self): return w_self.capitalize(space) From noreply at buildbot.pypy.org Wed Mar 21 12:48:56 2012 From: noreply at buildbot.pypy.org (aliles) Date: Wed, 21 Mar 2012 12:48:56 +0100 (CET) Subject: [pypy-commit] pypy bytearray-refactor: Add missing _mixin_ attribute to mixin classes. Message-ID: <20120321114856.DE11182112@wyvern.cs.uni-duesseldorf.de> Author: Aaron Iles Branch: bytearray-refactor Changeset: r53867:a3cfcb5962d7 Date: 2012-03-21 22:47 +1100 http://bitbucket.org/pypy/pypy/changeset/a3cfcb5962d7/ Log: Add missing _mixin_ attribute to mixin classes. diff --git a/pypy/objspace/std/abstractstring.py b/pypy/objspace/std/abstractstring.py --- a/pypy/objspace/std/abstractstring.py +++ b/pypy/objspace/std/abstractstring.py @@ -5,6 +5,8 @@ class Mixin_BaseStringMethods(object): __slots__ = () + _mixin_ = True + def isalnum(w_self, space): return w_self._all_true(space, w_self._isalnum) From noreply at buildbot.pypy.org Wed Mar 21 14:31:10 2012 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Mar 2012 14:31:10 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120321133110.893758236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53869:65f628f558ca Date: 2012-03-21 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/65f628f558ca/ Log: merge heads diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Wed Mar 21 14:31:09 2012 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Mar 2012 14:31:09 +0100 (CET) Subject: [pypy-commit] pypy default: Test for 1c6dc3e6e70c. Message-ID: <20120321133109.3E0EC82112@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53868:da21b53693bd Date: 2012-03-20 21:24 +0100 http://bitbucket.org/pypy/pypy/changeset/da21b53693bd/ Log: Test for 1c6dc3e6e70c. diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data From noreply at buildbot.pypy.org Wed Mar 21 16:09:32 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 16:09:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill these two tests about longs: they are no longer there Message-ID: <20120321150932.A369D82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53870:2482ed39dc1d Date: 2012-03-21 12:28 +0100 http://bitbucket.org/pypy/pypy/changeset/2482ed39dc1d/ Log: kill these two tests about longs: they are no longer there diff --git a/pypy/objspace/std/test/test_operation.py b/pypy/objspace/std/test/test_operation.py --- a/pypy/objspace/std/test/test_operation.py +++ b/pypy/objspace/std/test/test_operation.py @@ -1,61 +1,3 @@ - - -def app_test_int_vs_long(): - def teq(a, b): - assert a == b - assert type(a) is type(b) - - # binary operators - teq( 5 - 2 , 3 ) - teq( 5 - 2L , 3L ) - teq( 5L - 2 , 3L ) - teq( 5L - 2L , 3L ) - - teq( 5 .__sub__(2 ), 3 ) - teq( 5 .__sub__(2L), NotImplemented ) - teq( 5L .__sub__(2 ), 3L ) - teq( 5L .__sub__(2L), 3L ) - - teq( 5 .__rsub__(2 ), -3 ) - teq( 5 .__rsub__(2L), NotImplemented ) - teq( 5L .__rsub__(2 ), -3L ) - teq( 5L .__rsub__(2L), -3L ) - - teq( 5 ** 2 , 25 ) - teq( 5 ** 2L , 25L ) - teq( 5L ** 2 , 25L ) - teq( 5L ** 2L , 25L ) - - # ternary operator - teq( pow( 5 , 3 , 100 ), 25 ) - teq( pow( 5 , 3 , 100L), 25L) - teq( pow( 5 , 3L, 100 ), 25L) - teq( pow( 5 , 3L, 100L), 25L) - teq( pow( 5L, 3 , 100 ), 25L) - teq( pow( 5L, 3 , 100L), 25L) - teq( pow( 5L, 3L, 100 ), 25L) - teq( pow( 5L, 3L, 100L), 25L) - - # two tests give a different result on PyPy and CPython. - # however, there is no sane way that PyPy can match CPython here, - # short of reintroducing three-way coercion... - teq( 5 .__pow__(3 , 100 ), 25 ) - #teq( 5 .__pow__(3 , 100L), 25L or NotImplemented? ) - teq( 5 .__pow__(3L, 100 ), NotImplemented ) - teq( 5 .__pow__(3L, 100L), NotImplemented ) - teq( 5L .__pow__(3 , 100 ), 25L) - teq( 5L .__pow__(3 , 100L), 25L) - teq( 5L .__pow__(3L, 100 ), 25L) - teq( 5L .__pow__(3L, 100L), 25L) - - teq( 5 .__rpow__(3 , 100 ), 43 ) - #teq( 5 .__rpow__(3 , 100L), 43L or NotImplemented? ) - teq( 5 .__rpow__(3L, 100 ), NotImplemented ) - teq( 5 .__rpow__(3L, 100L), NotImplemented ) - teq( 5L .__rpow__(3 , 100 ), 43L) - teq( 5L .__rpow__(3 , 100L), 43L) - teq( 5L .__rpow__(3L, 100 ), 43L) - teq( 5L .__rpow__(3L, 100L), 43L) def app_test_int_vs_float(): @@ -98,37 +40,3 @@ teq( 5 .__rpow__(3.0, 100 ), NotImplemented ) teq( 5 .__rpow__(3.0, 100.0), NotImplemented ) - - -def app_test_long_vs_float(): - def teq(a, b): - assert a == b - assert type(a) is type(b) - - # binary operators - teq( 5L - 2.0 , 3.0 ) - teq( 5.0 - 2L , 3.0 ) - - teq( 5L .__sub__(2.0), NotImplemented ) - teq( 5.0 .__sub__(2L ), 3.0 ) - - teq( 5L .__rsub__(2.0), NotImplemented ) - teq( 5.0 .__rsub__(2L ), -3.0 ) - - teq( 5L ** 2.0 , 25.0 ) - teq( 5.0 ** 2L , 25.0 ) - - # pow() fails with a float argument anywhere - raises(TypeError, pow, 5L , 3L , 100.0) - raises(TypeError, pow, 5L , 3.0, 100 ) - raises(TypeError, pow, 5L , 3.0, 100.0) - raises(TypeError, pow, 5.0, 3L , 100 ) - raises(TypeError, pow, 5.0, 3L , 100.0) - raises(TypeError, pow, 5.0, 3.0, 100 ) - raises(TypeError, pow, 5.0, 3.0, 100.0) - - teq( 5L .__pow__(3.0, 100L ), NotImplemented ) - teq( 5L .__pow__(3.0, 100.0), NotImplemented ) - - teq( 5L .__rpow__(3.0, 100L ), NotImplemented ) - teq( 5L .__rpow__(3.0, 100.0), NotImplemented ) From noreply at buildbot.pypy.org Wed Mar 21 16:09:33 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 16:09:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: bah, ints are now W_LongObject, so this multimethod was never called. Adapt it to deal with longs Message-ID: <20120321150933.E90C18236A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53871:b113dbf5ff2e Date: 2012-03-21 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/b113dbf5ff2e/ Log: bah, ints are now W_LongObject, so this multimethod was never called. Adapt it to deal with longs diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -9,7 +9,7 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype, newformat -from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.tupleobject import W_TupleObject @@ -457,9 +457,16 @@ sub = w_sub._value return space.newbool(self.find(sub) >= 0) -def contains__String_Int(space, w_self, w_char): +def contains__String_Long(space, w_self, w_char): self = w_self._value - char = w_char.intval + try: + char = space.int_w(w_char) + except OperationError, e: + if e.match(space, space.w_OverflowError): + char = 256 # arbitrary value which will trigger the ValueError + # condition below + else: + raise if 0 <= char < 256: return space.newbool(self.find(chr(char)) >= 0) else: From noreply at buildbot.pypy.org Wed Mar 21 20:40:20 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 20:40:20 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20120321194020.C59CD82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53872:4e43431597f2 Date: 2012-03-21 20:10 +0100 http://bitbucket.org/pypy/pypy/changeset/4e43431597f2/ Log: 2to3 diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -752,10 +752,10 @@ def test_replace_overflow(self): import sys - if sys.maxint > 2**31-1: + if sys.maxsize > 2**31-1: skip("Wrong platform") s = b"a" * (2**16) - raises(OverflowError, s.replace, "", s) + raises(OverflowError, s.replace, b"", s) def test_getslice(self): s = b"abc" From noreply at buildbot.pypy.org Wed Mar 21 20:40:22 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 20:40:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: we cannot mix bytes and unicode when .join(); adapt one test and kill the other Message-ID: <20120321194022.0E75282112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53873:2bbc42be3c96 Date: 2012-03-21 20:16 +0100 http://bitbucket.org/pypy/pypy/changeset/2bbc42be3c96/ Log: we cannot mix bytes and unicode when .join(); adapt one test and kill the other diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -525,35 +525,7 @@ raises(TypeError, b''.join, [[1]]) def test_unicode_join_str_arg_ascii(self): - raises(UnicodeDecodeError, u''.join, ['\xc3\xa1']) - - def test_unicode_join_str_arg_utf8(self): - # Need default encoding utf-8, but sys.setdefaultencoding - # is removed after startup. - import sys - if not hasattr(sys, 'setdefaultencoding'): - skip("sys.setdefaultencoding() not available") - old_encoding = sys.getdefaultencoding() - # Duplicate unittest.test_support.CleanImport logic because it won't - # import. - self.original_modules = sys.modules.copy() - try: - import sys as temp_sys - module_name = 'sys' - if module_name in sys.modules: - module = sys.modules[module_name] - # It is possible that module_name is just an alias for - # another module (e.g. stub for modules renamed in 3.x). - # In that case, we also need delete the real module to - # clear the import cache. - if module.__name__ != module_name: - del sys.modules[module.__name__] - del sys.modules[module_name] - temp_sys.setdefaultencoding('utf-8') - assert u''.join(['\xc3\xa1']) == u'\xe1' - finally: - temp_sys.setdefaultencoding(old_encoding) - sys.modules.update(self.original_modules) + raises(TypeError, ''.join, [b'\xc3\xa1']) def test_unicode_join_endcase(self): # This class inserts a Unicode object into its argument's natural From noreply at buildbot.pypy.org Wed Mar 21 20:40:23 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 20:40:23 +0100 (CET) Subject: [pypy-commit] pypy py3k: we no longer have oldstyle classes Message-ID: <20120321194023.4DD8D82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53874:2d9fd333072e Date: 2012-03-21 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/2d9fd333072e/ Log: we no longer have oldstyle classes diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -499,7 +499,7 @@ def test_abstract_mro(self): """ - class A1: # old-style class + class A1: # in py3k is a new-style class pass class B1(A1): pass @@ -509,8 +509,8 @@ pass class E1(D1, object, metaclass=type): pass - # old-style MRO in the classical part of the parent hierarchy - assert E1.__mro__ == (E1, D1, B1, A1, C1, object) + # new-style MRO, contrarily to python2 + assert E1.__mro__ == (E1, D1, B1, C1, A1, object) """ def test_nodoc(self): From noreply at buildbot.pypy.org Wed Mar 21 20:40:24 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 20:40:24 +0100 (CET) Subject: [pypy-commit] pypy py3k: our ints all inherits from W_AbstractIntObject now Message-ID: <20120321194024.8717B82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53875:fc6428f099b6 Date: 2012-03-21 20:38 +0100 http://bitbucket.org/pypy/pypy/changeset/fc6428f099b6/ Log: our ints all inherits from W_AbstractIntObject now diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py --- a/pypy/objspace/std/test/test_stdobjspace.py +++ b/pypy/objspace/std/test/test_stdobjspace.py @@ -49,13 +49,13 @@ def test_fastpath_isinstance(self): from pypy.objspace.std.stringobject import W_StringObject - from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.iterobject import W_SeqIterObject space = self.space assert space._get_interplevel_cls(space.w_str) is W_StringObject - assert space._get_interplevel_cls(space.w_int) is W_IntObject + assert space._get_interplevel_cls(space.w_int) is W_AbstractIntObject class X(W_StringObject): def __init__(self): pass From pullrequests-noreply at bitbucket.org Wed Mar 21 22:00:11 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Wed, 21 Mar 2012 21:00:11 -0000 Subject: [pypy-commit] [pypy/pypy] add ndmin param to numpypy.array (pull request #64) In-Reply-To: <9d47b22e90a590324f6a58b11a33a0e7@bitbucket.org> References: <9d47b22e90a590324f6a58b11a33a0e7@bitbucket.org> Message-ID: <20120321210011.27341.94564@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/64/add-ndmin-param-to-numpypyarray#comment-4214 Michael Blume (MichaelBlume) said: ...Did I just break BitBucket? -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Wed Mar 21 22:12:43 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 22:12:43 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: document and fix for libffi on mingw Message-ID: <20120321211243.1FA3682112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53876:861ddd5f6fdd Date: 2012-03-21 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/861ddd5f6fdd/ Log: document and fix for libffi on mingw diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,46 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. libffi for the mingw32 compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: +- Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +- If you do not use cygwin, you will need msys to provide make, +autoconf tools and other goodies. + - Download and unzip a `msys for mingw`_, say into c:\msys + - Edit the c:\msys\etc\fstab file to mount c:\mingw +- Download and unzip the `libffi source files`_, and extract +them in the base directory. +- Run c:\msys\msys.bat or a cygwin shell which should make you +feel better since it is a shell prompt with shell tools. +- cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS +If you wish to experiment with win64, you must run configure with flags:: + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 +or such, depending on your mingw64 download. + +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -114,9 +114,10 @@ ) eci = rffi_platform.configure_external_library( - 'libffi', eci, + 'libffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), + dict(prefix=r'c:\mingw64', include_dir='include', library_dir='lib'), ]) else: libffidir = py.path.local(pypydir).join('translator', 'c', 'src', 'libffi_msvc') From noreply at buildbot.pypy.org Wed Mar 21 22:51:56 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 22:51:56 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: make rst actually compile Message-ID: <20120321215156.0804682112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53877:57206e985432 Date: 2012-03-21 23:51 +0200 http://bitbucket.org/pypy/pypy/changeset/57206e985432/ Log: make rst actually compile diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -138,27 +138,33 @@ To enable the _rawffi (and ctypes) module, you need to compile a mingw32 version of libffi. Here is one way to do this, wich should allow you to try to build for win64 or win32: -- Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw -- If you do not use cygwin, you will need msys to provide make, -autoconf tools and other goodies. - - Download and unzip a `msys for mingw`_, say into c:\msys - - Edit the c:\msys\etc\fstab file to mount c:\mingw -- Download and unzip the `libffi source files`_, and extract -them in the base directory. -- Run c:\msys\msys.bat or a cygwin shell which should make you -feel better since it is a shell prompt with shell tools. -- cd to the libffi directory and do:: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll + If you can't find the dll, and the libtool issued a warning about "undefined symbols not allowed", you will need to edit the libffi Makefile in the toplevel directory. Add the flag -no-undefined to the definition of libffi_la_LDFLAGS If you wish to experiment with win64, you must run configure with flags:: + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + or such, depending on your mingw64 download. Since hacking on Pypy means running tests, you will need a way to specify From noreply at buildbot.pypy.org Wed Mar 21 22:59:45 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Mar 2012 22:59:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill longs and u'' string Message-ID: <20120321215945.D878282112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53878:c81d98daa906 Date: 2012-03-21 22:59 +0100 http://bitbucket.org/pypy/pypy/changeset/c81d98daa906/ Log: kill longs and u'' string diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -264,25 +264,17 @@ assert self.almost_equal(complex("1+10j"), 1+10j) assert self.almost_equal(complex(10), 10+0j) assert self.almost_equal(complex(10.0), 10+0j) - assert self.almost_equal(complex(10L), 10+0j) assert self.almost_equal(complex(10+0j), 10+0j) assert self.almost_equal(complex(1,10), 1+10j) - assert self.almost_equal(complex(1,10L), 1+10j) assert self.almost_equal(complex(1,10.0), 1+10j) - assert self.almost_equal(complex(1L,10), 1+10j) - assert self.almost_equal(complex(1L,10L), 1+10j) - assert self.almost_equal(complex(1L,10.0), 1+10j) assert self.almost_equal(complex(1.0,10), 1+10j) - assert self.almost_equal(complex(1.0,10L), 1+10j) assert self.almost_equal(complex(1.0,10.0), 1+10j) assert self.almost_equal(complex(3.14+0j), 3.14+0j) assert self.almost_equal(complex(3.14), 3.14+0j) assert self.almost_equal(complex(314), 314.0+0j) - assert self.almost_equal(complex(314L), 314.0+0j) assert self.almost_equal(complex(3.14+0j, 0j), 3.14+0j) assert self.almost_equal(complex(3.14, 0.0), 3.14+0j) assert self.almost_equal(complex(314, 0), 314.0+0j) - assert self.almost_equal(complex(314L, 0L), 314.0+0j) assert self.almost_equal(complex(0j, 3.14j), -3.14+0j) assert self.almost_equal(complex(0.0, 3.14j), -3.14+0j) assert self.almost_equal(complex(0j, 3.14), 3.14j) @@ -317,7 +309,6 @@ raises(ValueError, complex, '1+1j\0j') raises(TypeError, int, 5+3j) - raises(TypeError, long, 5+3j) raises(TypeError, float, 5+3j) raises(ValueError, complex, "") raises(TypeError, complex, None) @@ -368,7 +359,6 @@ pass assert j(100 + 0j) == 100 + 0j assert isinstance(j(100), j) - assert j(100L + 0j) == 100 + 0j assert j("100 + 0j") == 100 + 0j x = j(1+0j) x.foo = 42 @@ -541,8 +531,7 @@ # make sure everything works in ''.format() assert '*{0:.3f}*'.format(3.14159+2.71828j) == '*3.142+2.718j*' - assert u'*{0:.3f}*'.format(3.14159+2.71828j) == u'*3.142+2.718j*' - assert u'{:-}'.format(1.5+3.5j) == u'(1.5+3.5j)' + assert '{:-}'.format(1.5+3.5j) == '(1.5+3.5j)' INF = float("inf") NAN = float("nan") From noreply at buildbot.pypy.org Wed Mar 21 23:25:40 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Mar 2012 23:25:40 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: clarify Message-ID: <20120321222540.EA01882112@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53879:9d969f5edbc3 Date: 2012-03-22 00:25 +0200 http://bitbucket.org/pypy/pypy/changeset/9d969f5edbc3/ Log: clarify diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,10 +132,10 @@ the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 +To enable the _rawffi (and ctypes) module, you need to compile a mingw version of libffi. Here is one way to do this, wich should allow you to try to build for win64 or win32: @@ -150,7 +150,7 @@ them in the base directory. #. Run c:\msys\msys.bat or a cygwin shell which should make you feel better since it is a shell prompt with shell tools. -#. cd to the libffi directory and do:: +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make @@ -167,6 +167,8 @@ or such, depending on your mingw64 download. +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since hacking on Pypy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an From noreply at buildbot.pypy.org Thu Mar 22 00:17:11 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 00:17:11 +0100 (CET) Subject: [pypy-commit] pypy py3k: on 32bits, the accum variable might overflow to a long; make sure to cast back so a signed int before passing it to _store_digits Message-ID: <20120321231711.44F4482112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53880:01ce09ec95e6 Date: 2012-03-21 23:15 +0000 http://bitbucket.org/pypy/pypy/changeset/01ce09ec95e6/ Log: on 32bits, the accum variable might overflow to a long; make sure to cast back so a signed int before passing it to _store_digits diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -224,11 +224,11 @@ accum |= c accumbits += 8 if accumbits >= SHIFT: - digits.append(_store_digit(accum & MASK)) + digits.append(_store_digit(intmask(accum & MASK))) accum >>= SHIFT accumbits -= SHIFT if accumbits: - digits.append(_store_digit(accum)) + digits.append(_store_digit(intmask(accum))) return rbigint(digits[:], 1) @jit.elidable diff --git a/pypy/rlib/test/test_rbigint.py b/pypy/rlib/test/test_rbigint.py --- a/pypy/rlib/test/test_rbigint.py +++ b/pypy/rlib/test/test_rbigint.py @@ -692,3 +692,8 @@ res = interpret(fn, []) assert res == -42.0 + + def test_frombytes(self): + s = "\xFF\x12\x34\x56" + bigint = rbigint.frombytes(s) + assert bigint.tolong() == 0xFF123456 From pullrequests-noreply at bitbucket.org Thu Mar 22 05:01:11 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 04:01:11 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) Message-ID: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> A new pull request has been opened by Michael Blume. MichaelBlume/pypy/ndmin has changes to be pulled into pypy/pypy/default. https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray Title: add param ndmin to numpypy.array Changes to be pulled: c22d05cd841d by Michael Blume: "make test pass -- add ndmin param to numpy.array" 67cb9cfe42fe by Michael Blume: "add failing test of ndmin parameter of numpy.array" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 05:11:23 2012 From: pullrequests-noreply at bitbucket.org (Alex Gaynor) Date: Thu, 22 Mar 2012 04:11:23 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322041123.12999.34985@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4240 Alex Gaynor (alex_gaynor) said: What version of numpy was this added in, 2.0? -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 05:27:00 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 04:27:00 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322042700.27873.78615@bitbucket03.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4241 Michael Blume (MichaelBlume) said: Looks like it was present in v0.9.5 -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 05:28:27 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 04:28:27 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322042827.22858.53682@bitbucket05.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4242 Michael Blume (MichaelBlume) said: https://github.com/numpy/numpy/commit/5beecfe9 -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 05:31:39 2012 From: pullrequests-noreply at bitbucket.org (Alex Gaynor) Date: Thu, 22 Mar 2012 04:31:39 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322043139.26590.94528@bitbucket05.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4244 Alex Gaynor (alex_gaynor) said: Ah, I'm a moron I've been trying to do "ndim" instead of "ndmin". You should probably use NoneNotWrapped, instead of None here, actually explicitly passing None isn't valid, only not passing anything (it's obscure, I know). -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Thu Mar 22 06:10:51 2012 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 22 Mar 2012 05:10:51 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20120322051051.5034.86370@bitbucket01.managed.contegix.com> You have received a notification from evelyn559. Hi, I forked pypy. My fork is at https://bitbucket.org/evelyn559/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Thu Mar 22 11:03:19 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: we no longer have the applevel 'buffer', so no way the get the raw bytes of the unicode string. Hardcode the bytes because we know it's UCS-4, at least on linux Message-ID: <20120322100319.E79F282438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53881:8d078dd5f91a Date: 2012-03-22 09:50 +0100 http://bitbucket.org/pypy/pypy/changeset/8d078dd5f91a/ Log: we no longer have the applevel 'buffer', so no way the get the raw bytes of the unicode string. Hardcode the bytes because we know it's UCS-4, at least on linux diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -372,9 +372,11 @@ import sys if '__pypy__' not in sys.builtin_module_names: skip("PyPy extension") - data = self.struct.pack("uuu", u'X', u'Y', u'Z') - assert data == bytes(buffer(u'XYZ')) - assert self.struct.unpack("uuu", data) == (u'X', u'Y', u'Z') + data = self.struct.pack("uuu", 'X', 'Y', 'Z') + # this assumes UCS4; adapt/extend the test on platforms where we use + # another format + assert data == b'X\x00\x00\x00Y\x00\x00\x00Z\x00\x00\x00' + assert self.struct.unpack("uuu", data) == ('X', 'Y', 'Z') def test_unpack_buffer(self): From noreply at buildbot.pypy.org Thu Mar 22 11:03:21 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:21 +0100 (CET) Subject: [pypy-commit] pypy py3k: we no longer have buffers, but we have memoryview instead Message-ID: <20120322100321.46DE982438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53882:ed773b252600 Date: 2012-03-22 09:54 +0100 http://bitbucket.org/pypy/pypy/changeset/ed773b252600/ Log: we no longer have buffers, but we have memoryview instead diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -379,11 +379,11 @@ assert self.struct.unpack("uuu", data) == ('X', 'Y', 'Z') - def test_unpack_buffer(self): + def test_unpack_memoryview(self): """ - Buffer objects can be passed to struct.unpack(). + memoryview objects can be passed to struct.unpack(). """ - b = buffer(self.struct.pack("ii", 62, 12)) + b = memoryview(self.struct.pack("ii", 62, 12)) assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) From noreply at buildbot.pypy.org Thu Mar 22 11:03:22 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322100322.8DD2F82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53883:ba50bd9d0176 Date: 2012-03-22 10:31 +0100 http://bitbucket.org/pypy/pypy/changeset/ba50bd9d0176/ Log: s/buffer/memoryview diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -11,12 +11,12 @@ # XXX inefficient def pack_into(fmt, buf, offset, *args): data = struct.pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data + memoryview(buf)[offset:offset+len(data)] = data # XXX inefficient def unpack_from(fmt, buf, offset=0): size = _struct.calcsize(fmt) - data = buffer(buf)[offset:offset+size] + data = memoryview(buf)[offset:offset+size] if len(data) != size: raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) From noreply at buildbot.pypy.org Thu Mar 22 11:03:23 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:23 +0100 (CET) Subject: [pypy-commit] pypy py3k: typo Message-ID: <20120322100323.DEAD282438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53884:e711ed82327a Date: 2012-03-22 10:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e711ed82327a/ Log: typo diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -15,7 +15,7 @@ # XXX inefficient def unpack_from(fmt, buf, offset=0): - size = _struct.calcsize(fmt) + size = struct.calcsize(fmt) data = memoryview(buf)[offset:offset+size] if len(data) != size: raise error("unpack_from requires a buffer of at least %d bytes" From noreply at buildbot.pypy.org Thu Mar 22 11:03:25 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:25 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322100325.32D3582438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53885:d783f4891b3f Date: 2012-03-22 10:43 +0100 http://bitbucket.org/pypy/pypy/changeset/d783f4891b3f/ Log: s/buffer/memoryview diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -992,7 +992,7 @@ import _rawffi S = _rawffi.Structure((40, 1)) s = S(autofree=True) - b = buffer(s) + b = memoryview(s) assert len(b) == 40 b[4] = b'X' b[:3] = b'ABC' @@ -1001,7 +1001,7 @@ A = _rawffi.Array('c') a = A(10, autofree=True) a[3] = b'x' - b = buffer(a) + b = memoryview(a) assert len(b) == 10 assert b[3] == b'x' b[6] = b'y' diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -534,7 +534,7 @@ f.write(b"foobar") f.flush() m = mmap(f.fileno(), 6) - b = buffer(m) + b = memoryview(m) assert len(b) == 6 assert b[3] == b"b" assert b[:] == b"foobar" From noreply at buildbot.pypy.org Thu Mar 22 11:03:26 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:26 +0100 (CET) Subject: [pypy-commit] pypy py3k: we no longer have buffer at applevel, kill this check Message-ID: <20120322100326.7404E82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53886:5a99728a7e69 Date: 2012-03-22 10:51 +0100 http://bitbucket.org/pypy/pypy/changeset/5a99728a7e69/ Log: we no longer have buffer at applevel, kill this check diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -8,7 +8,6 @@ def test_bytebuffer(self): from __pypy__ import bytebuffer b = bytebuffer(12) - assert isinstance(b, buffer) assert len(b) == 12 b[3] = b'!' b[5] = b'?' From noreply at buildbot.pypy.org Thu Mar 22 11:03:27 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322100327.B44CF82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53887:963d9f685fb9 Date: 2012-03-22 10:57 +0100 http://bitbucket.org/pypy/pypy/changeset/963d9f685fb9/ Log: s/buffer/memoryview diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -517,13 +517,13 @@ s.connect(("www.python.org", 80)) except _socket.gaierror as ex: skip("GAIError - probably no connection: %s" % str(ex.args)) - s.send(buffer(b'')) - s.sendall(buffer(b'')) + s.send(memoryview(b'')) + s.sendall(memoryview(b'')) raises(TypeError, s.send, '') raises(TypeError, s.sendall, '') s.close() s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM, 0) - s.sendto(buffer(b''), ('localhost', 9)) # Send to discard port. + s.sendto(memoryview(b''), ('localhost', 9)) # Send to discard port. s.close() def test_unix_socket_connect(self): @@ -642,7 +642,7 @@ cli.connect(self.serv.getsockname()) fileno, addr = self.serv._accept() conn = socket.socket(fileno=fileno) - buf = buffer(MSG) + buf = memoryview(MSG) conn.send(buf) buf = array.array('b', b' '*1024) nbytes = cli.recv_into(buf) @@ -658,7 +658,7 @@ cli.connect(self.serv.getsockname()) fileno, addr = self.serv._accept() conn = socket.socket(fileno=fileno) - buf = buffer(MSG) + buf = memoryview(MSG) conn.send(buf) buf = array.array('b', b' '*1024) nbytes, addr = cli.recvfrom_into(buf) From noreply at buildbot.pypy.org Thu Mar 22 11:03:28 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322100328.F3A0A82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53888:6948ab3687f7 Date: 2012-03-22 11:02 +0100 http://bitbucket.org/pypy/pypy/changeset/6948ab3687f7/ Log: s/buffer/memoryview diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -400,7 +400,7 @@ def test_buffer(self): a = self.array('h', b'Hi') - buf = buffer(a) + buf = memoryview(a) assert buf[1] == b'i' #raises(TypeError, buf.__setitem__, 1, 'o') diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -95,7 +95,7 @@ def test_buffer(self): from bz2 import BZ2Compressor bz2c = BZ2Compressor() - data = bz2c.compress(buffer(self.TEXT)) + data = bz2c.compress(memoryview(self.TEXT)) data += bz2c.flush() assert self.decompress(data) == self.TEXT @@ -163,7 +163,7 @@ def test_buffer(self): from bz2 import BZ2Decompressor bz2d = BZ2Decompressor() - decompressed_data = bz2d.decompress(buffer(self.DATA)) + decompressed_data = bz2d.decompress(memoryview(self.DATA)) assert decompressed_data == self.TEXT def test_subsequent_read(self): @@ -219,6 +219,6 @@ def test_buffer(self): import bz2 - data = bz2.compress(buffer(self.TEXT)) - result = bz2.decompress(buffer(data)) + data = bz2.compress(memoryview(self.TEXT)) + result = bz2.decompress(memoryview(data)) assert result == self.TEXT From noreply at buildbot.pypy.org Thu Mar 22 11:03:30 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:03:30 +0100 (CET) Subject: [pypy-commit] pypy default: update the comment and kill the XXX, this is not really 'temporary' at all Message-ID: <20120322100330.3A3C382438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53889:842c2e057680 Date: 2012-03-22 11:02 +0100 http://bitbucket.org/pypy/pypy/changeset/842c2e057680/ Log: update the comment and kill the XXX, this is not really 'temporary' at all diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: From noreply at buildbot.pypy.org Thu Mar 22 11:26:05 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:26:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322102605.C89F082438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53890:3d2e3a832dfc Date: 2012-03-22 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/3d2e3a832dfc/ Log: s/buffer/memoryview diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -33,7 +33,7 @@ raises((IOError, ValueError), fcntl.fcntl, -1, 1, 0) assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == b"foo" - assert fcntl.fcntl(f, 2, buffer(b"foo")) == b"foo" + assert fcntl.fcntl(f, 2, memoryview(b"foo")) == b"foo" try: os.O_LARGEFILE diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -215,21 +215,21 @@ """ We should be able to pass buffer objects instead of strings. """ - assert self.zlib.crc32(buffer(b'hello, world.')) == -936931198 - assert self.zlib.adler32(buffer(b'hello, world.')) == 571147447 + assert self.zlib.crc32(memoryview(b'hello, world.')) == -936931198 + assert self.zlib.adler32(memoryview(b'hello, world.')) == 571147447 compressor = self.zlib.compressobj() - bytes = compressor.compress(buffer(self.expanded)) + bytes = compressor.compress(memoryview(self.expanded)) bytes += compressor.flush() assert bytes == self.compressed decompressor = self.zlib.decompressobj() - bytes = decompressor.decompress(buffer(self.compressed)) + bytes = decompressor.decompress(memoryview(self.compressed)) bytes += decompressor.flush() assert bytes == self.expanded - bytes = self.zlib.compress(buffer(self.expanded)) + bytes = self.zlib.compress(memoryview(self.expanded)) assert bytes == self.compressed - bytes = self.zlib.decompress(buffer(self.compressed)) + bytes = self.zlib.decompress(memoryview(self.compressed)) assert bytes == self.expanded From noreply at buildbot.pypy.org Thu Mar 22 11:26:07 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:26:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120322102607.EC0F182438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53891:b93fdee2d213 Date: 2012-03-22 11:16 +0100 http://bitbucket.org/pypy/pypy/changeset/b93fdee2d213/ Log: s/buffer/memoryview diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -356,7 +356,7 @@ b.extend(bytearray(b'def')) b.extend(b'ghi') assert b == b'abcdefghi' - b.extend(buffer(b'jkl')) + b.extend(memoryview(b'jkl')) assert b == b'abcdefghijkl' b = bytearray(b'world') @@ -419,7 +419,7 @@ def test_buffer(self): b = bytearray(b'abcdefghi') - buf = buffer(b) + buf = memoryview(b) assert buf[2] == b'c' buf[3] = b'D' assert b == b'abcDefghi' diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -244,8 +244,8 @@ assert b'123x123'.replace(b'123', b'') == b'x' def test_replace_buffer(self): - assert b'one'.replace(buffer(b'o'), buffer(b'n'), 1) == b'nne' - assert b'one'.replace(buffer(b'o'), buffer(b'n')) == b'nne' + assert b'one'.replace(memoryview(b'o'), memoryview(b'n'), 1) == b'nne' + assert b'one'.replace(memoryview(b'o'), memoryview(b'n')) == b'nne' def test_strip(self): s = " a b " @@ -689,7 +689,7 @@ def test_buffer(self): x = b"he" x += b"llo" - b = buffer(x) + b = memoryview(x) assert len(b) == 5 assert b[-1] == b"o" assert b[:] == b"hello" From noreply at buildbot.pypy.org Thu Mar 22 11:26:10 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 11:26:10 +0100 (CET) Subject: [pypy-commit] pypy py3k: this is not supposed to raise, but to work Message-ID: <20120322102610.779AC82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53892:563355a096a1 Date: 2012-03-22 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/563355a096a1/ Log: this is not supposed to raise, but to work diff --git a/pypy/objspace/std/test/test_strsliceobject.py b/pypy/objspace/std/test/test_strsliceobject.py --- a/pypy/objspace/std/test/test_strsliceobject.py +++ b/pypy/objspace/std/test/test_strsliceobject.py @@ -98,7 +98,7 @@ assert b'a' in s assert b'ab' in s assert not b'd' in s - raises(TypeError, slice(b'a' * 100).__contains__, 1) + assert ord(b'a') in slice(b'a' * 100) def test_hash(self): import __pypy__ From noreply at buildbot.pypy.org Thu Mar 22 14:36:59 2012 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Mar 2012 14:36:59 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: remove debugger call Message-ID: <20120322133659.7C32182438@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r53893:43c78272fba8 Date: 2012-03-22 13:34 +0000 http://bitbucket.org/pypy/pypy/changeset/43c78272fba8/ Log: remove debugger call diff --git a/pypy/jit/backend/arm/test/test_ztranslation.py b/pypy/jit/backend/arm/test/test_ztranslation.py --- a/pypy/jit/backend/arm/test/test_ztranslation.py +++ b/pypy/jit/backend/arm/test/test_ztranslation.py @@ -24,7 +24,7 @@ return t def _check_cbuilder(self, cbuilder): - import pdb; pdb.set_trace() + pass def test_stuff_translates(self): # this is a basic test that tries to hit a number of features and their From noreply at buildbot.pypy.org Thu Mar 22 14:37:01 2012 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Mar 2012 14:37:01 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: implement stack overflow checks in the backend Message-ID: <20120322133701.196B582439@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r53894:e04c88a968bb Date: 2012-03-22 13:35 +0000 http://bitbucket.org/pypy/pypy/changeset/e04c88a968bb/ Log: implement stack overflow checks in the backend diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -61,6 +61,7 @@ self._regalloc = None self.datablockwrapper = None self.propagate_exception_path = 0 + self.stack_check_slowpath = 0 self._compute_stack_size() self._debug = False self.loop_run_counters = [] @@ -108,6 +109,7 @@ self._build_propagate_exception_path() if gc_ll_descr.get_malloc_slowpath_addr is not None: self._build_malloc_slowpath() + self._build_stack_check_slowpath() if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: self._build_release_gil(gc_ll_descr.gcrootmap) self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) @@ -238,6 +240,51 @@ self.gen_func_epilog(mc=mc) self.propagate_exception_path = mc.materialize(self.cpu.asmmemmgr, []) + def _build_stack_check_slowpath(self): + _, _, slowpathaddr = self.cpu.insert_stack_check() + if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0: + return # no stack check (for tests, or non-translated) + # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | retaddr of caller | <-- aligned to a multiple of 16 + # | saved argument regs | + # | my own retaddr | <-- sp + # +-----------------------+ + # + mc = ARMv7Builder() + # save argument registers and return address + mc.PUSH([reg.value for reg in r.argument_regs] + [r.lr.value]) + # stack is aligned here + # Pass current stack pointer as argument to the call + mc.MOV_rr(r.r0.value, r.sp.value) + # + mc.BL(slowpathaddr) + + # check for an exception + mc.gen_load_int(r.r0.value, self.cpu.pos_exception()) + mc.LDR_ri(r.r0.value, r.r0.value) + mc.TST_rr(r.r0.value, r.r0.value) + # restore registers and return + # We check for c.EQ here, meaning all bits zero in this case + mc.POP([reg.value for reg in r.argument_regs] + [r.pc.value], cond=c.EQ) + # call on_leave_jitted_save_exc() + addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + mc.BL(addr) + # + mc.gen_load_int(r.r0.value, self.cpu.propagate_exception_v) + # + # footer -- note the ADD, which skips the return address of this + # function, and will instead return to the caller's caller. Note + # also that we completely ignore the saved arguments, because we + # are interrupting the function. + mc.ADD_ri(r.sp.value, r.sp.value, (len(r.argument_regs) + 1) * WORD) + mc.POP([r.pc.value]) + # + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.stack_check_slowpath = rawstart + def setup_failure_recovery(self): @rgc.no_collect @@ -568,6 +615,27 @@ self.align() self.gen_func_prolog() + def _call_header_with_stack_check(self): + if self.stack_check_slowpath == 0: + pass # no stack check (e.g. not translated) + else: + endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + self.mc.PUSH([r.lr.value]) + # load stack end + self.mc.gen_load_int(r.ip.value, endaddr) # load ip, [end] + self.mc.LDR_ri(r.ip.value, r.ip.value) # LDR ip, ip + # load stack length + self.mc.gen_load_int(r.lr.value, lengthaddr) # load lr, lengh + self.mc.LDR_ri(r.lr.value, r.lr.value) # ldr lr, *lengh + # calculate ofs + self.mc.SUB_rr(r.ip.value, r.ip.value, r.sp.value) # SUB ip, current + # if ofs + self.mc.CMP_rr(r.ip.value, r.lr.value) # CMP ip, lr + self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr + # + self.mc.POP([r.lr.value]) + self._call_header() + # cpu interface def assemble_loop(self, loopname, inputargs, operations, looptoken, log): clt = CompiledLoopToken(self.cpu, looptoken.number) @@ -584,7 +652,7 @@ operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) - self._call_header() + self._call_header_with_stack_check() sp_patch_location = self._prepare_sp_patch_position() regalloc = Regalloc(assembler=self, frame_manager=ARMFrameManager()) From noreply at buildbot.pypy.org Thu Mar 22 14:46:45 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 14:46:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill this line, which was introduced by 794e8ba95264: we never want pathname Message-ID: <20120322134645.F1BE582438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53895:dbe87802495f Date: 2012-03-22 14:05 +0100 http://bitbucket.org/pypy/pypy/changeset/dbe87802495f/ Log: kill this line, which was introduced by 794e8ba95264: we never want pathname to point to a pyc file, else update_code_filenames puts *.pyc inside co_filename. All the tests introduced by 794e8ba95264 seems to pass, so I don't know what was its original purpose diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -968,7 +968,6 @@ code_w = read_compiled_module(space, cpathname, stream.readall()) finally: stream.close() - pathname = cpathname else: code_w = parse_source_module(space, pathname, source) From noreply at buildbot.pypy.org Thu Mar 22 14:46:47 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 14:46:47 +0100 (CET) Subject: [pypy-commit] pypy py3k: enable the array and struct modules, for test_array_write. Else the test imports the corresponding modules in lib_pypy, which are now broken because we no longer have buffer (and I am not sure that fixing them is a good idea) Message-ID: <20120322134647.3C64F82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53896:6e58d4a066e8 Date: 2012-03-22 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/6e58d4a066e8/ Log: enable the array and struct modules, for test_array_write. Else the test imports the corresponding modules in lib_pypy, which are now broken because we no longer have buffer (and I am not sure that fixing them is a good idea) diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -160,7 +160,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) From noreply at buildbot.pypy.org Thu Mar 22 14:46:48 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 14:46:48 +0100 (CET) Subject: [pypy-commit] pypy default: kill struct, array, binascii and _locale from lib_pypy: all these modules are Message-ID: <20120322134648.ABA0682438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53897:a0a90bad00dd Date: 2012-03-22 14:46 +0100 http://bitbucket.org/pypy/pypy/changeset/a0a90bad00dd/ Log: kill struct, array, binascii and _locale from lib_pypy: all these modules are implemented also at interp-level and so this code is very rarely used (only if you explicitly disable those modules to be compiled in). Fix some tests by adding array and struct to usemodules=[...]. So far they worked "by chance" because the picked the version in lib_pypy. diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) From noreply at buildbot.pypy.org Thu Mar 22 15:42:32 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 22 Mar 2012 15:42:32 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): insert stack check before building the PyPy frame Message-ID: <20120322144232.8262582438@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53898:398b83a81dbf Date: 2012-03-22 07:41 -0700 http://bitbucket.org/pypy/pypy/changeset/398b83a81dbf/ Log: (bivab, hager): insert stack check before building the PyPy frame diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -92,6 +92,7 @@ self._regalloc = None self.max_stack_params = 0 self.propagate_exception_path = 0 + self.stack_check_slowpath = 0 self.setup_failure_recovery() self._debug = False self.loop_run_counters = [] @@ -377,6 +378,133 @@ self.write_64_bit_func_descr(rawstart, rawstart+3*WORD) self.malloc_slowpath = rawstart + def _build_stack_check_slowpath(self): + _, _, slowpathaddr = self.cpu.insert_stack_check() + if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0: + return # no stack check (for tests, or non-translated) + # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | | + # | OLD BACKCHAIN | + # | | + # =============================== - + # | | | > MINI FRAME (BACHCHAIN SIZE * WORD) + # | BACKCHAIN | | + # | | | + # =============================== - + # | | + # | SAVED PARAM REGS | + # | | + # ------------------------------- + # | | + # | BACKCHAIN | + # | | + # =============================== <- SP + # + mc = PPCBuilder() + # save argument registers and return address + + # make small frame to store data (parameter regs + LR + SCRATCH) in + # there + SAVE_AREA = len(r.PARAM_REGS) + frame_size = (BACKCHAIN_SIZE + SAVE_AREA) * WORD + + # align the SP + MINIFRAME_SIZE = BACKCHAIN_SIZE * WORD + while (frame_size + MINIFRAME_SIZE) % (4 * WORD) != 0: + frame_size += WORD + + # write function descriptor + if IS_PPC_64: + for _ in range(6): + mc.write32(0) + + # build frame + with scratch_reg(mc): + if IS_PPC_32: + mc.stwu(r.SP.value, r.SP.value, -frame_size) + mc.mflr(r.SCRATCH.value) + mc.stw(r.SCRATCH.value, r.SP.value, frame_size + WORD) + else: + mc.stdu(r.SP.value, r.SP.value, -frame_size) + mc.mflr(r.SCRATCH.value) + mc.std(r.SCRATCH.value, r.SP.value, frame_size + 2 * WORD) + + # save parameter registers + for i, reg in enumerate(r.PARAM_REGS): + mc.store(reg.value, r.SP.value, (i + BACKCHAIN_SIZE) * WORD) + + # use SP as single parameter for the call + mc.mr(r.r3.value, r.SP.value) + + # stack still aligned + mc.call(slowpathaddr) + + with scratch_reg(mc): + mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) + mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) + # if this comparison is true, then everything is ok, + # else we have an exception + mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) + + jnz_location = mc.currpos() + mc.nop() + + # restore parameter registers + for i, reg in enumerate(r.PARAM_REGS): + mc.load(reg.value, r.SP.value, (i + BACKCHAIN_SIZE) * WORD) + + # restore LR + with scratch_reg(mc): + lr_offset = frame_size + WORD + if IS_PPC_64: + lr_offset += WORD + + mc.load(r.SCRATCH.value, r.SP.value, + lr_offset) + mc.mtlr(r.SCRATCH.value) + + # reset SP + mc.addi(r.SP.value, r.SP.value, frame_size) + mc.blr() + + pmc = OverwritingBuilder(mc, jnz_location, 1) + pmc.bc(4, 2, mc.currpos() - jnz_location) + pmc.overwrite() + + # call on_leave_jitted_save_exc() + addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + mc.call(addr) + # + mc.load_imm(r.RES, self.cpu.propagate_exception_v) + # + # footer -- note the addi, which skips the return address of this + # function, and will instead return to the caller's caller. Note + # also that we completely ignore the saved arguments, because we + # are interrupting the function. + + # restore link register out of preprevious frame + offset_LR = frame_size + BACKCHAIN_SIZE * WORD + WORD + if IS_PPC_64: + offset_LR += WORD + + with scratch_reg(mc): + mc.load(r.SCRATCH.value, r.SP.value, offset_LR) + mc.mtlr(r.SCRATCH.value) + + # remove this frame and the miniframe + both_framesizes = frame_size + BACKCHAIN_SIZE * WORD + mc.addi(r.SP.value, r.SP.value, both_framesizes) + mc.blr() + + mc.prepare_insts_blocks() + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + if IS_PPC_64: + self.write_64_bit_func_descr(rawstart, rawstart+3*WORD) + self.stack_check_slowpath = rawstart + def _build_propagate_exception_path(self): if self.cpu.propagate_exception_v < 0: return @@ -463,9 +591,76 @@ mc.store(reg.value, r.SPP.value, i * WORD) def gen_bootstrap_code(self, loophead, spilling_area): + self._insert_stack_check() self._make_frame(spilling_area) self.mc.b_offset(loophead) + def _insert_stack_check(self): + if self.stack_check_slowpath == 0: + pass # not translated + else: + # this is the size for the miniframe + frame_size = BACKCHAIN_SIZE * WORD + + endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + + # save r16 + self.mc.mtctr(r.r16.value) + + with scratch_reg(self.mc): + self.mc.load_imm(r.SCRATCH, endaddr) # load SCRATCH, [start] + self.mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) + self.mc.subf(r.SCRATCH.value, r.SP.value, r.SCRATCH.value) + self.mc.load_imm(r.r16, lengthaddr) + self.mc.load(r.r16.value, r.r16.value, 0) + self.mc.cmp_op(0, r.SCRATCH.value, r.r16.value, signed=False) + + # restore r16 + self.mc.mfctr(r.r16.value) + + patch_loc = self.mc.currpos() + self.mc.nop() + + # make minimal frame which contains the LR + # + # | OLD FRAME | + # ============================== + # | | + # | BACKCHAIN | > BACKCHAIN_SIZE * WORD + # | | + # ============================== <- SP + + if IS_PPC_32: + self.mc.stwu(r.SP.value, r.SP.value, -frame_size) + self.mc.mflr(r.SCRATCH.value) + self.mc.stw(r.SCRATCH.value, r.SP.value, frame_size + WORD) + else: + self.mc.stdu(r.SP.value, r.SP.value, -frame_size) + self.mc.mflr(r.SCRATCH.value) + self.mc.std(r.SCRATCH.value, r.SP.value, frame_size + 2 * WORD) + + # make check + self.mc.call(self.stack_check_slowpath) + + # restore LR + with scratch_reg(self.mc): + lr_offset = frame_size + WORD + if IS_PPC_64: + lr_offset += WORD + + self.mc.load(r.SCRATCH.value, r.SP.value, + lr_offset) + self.mc.mtlr(r.SCRATCH.value) + + # remove minimal frame + self.mc.addi(r.SP.value, r.SP.value, frame_size) + + offset = self.mc.currpos() - patch_loc + # + pmc = OverwritingBuilder(self.mc, patch_loc, 1) + pmc.bc(4, 1, offset) # jump if SCRATCH <= r16, i. e. not(SCRATCH > r16) + pmc.overwrite() + def setup(self, looptoken, operations): self.current_clt = looptoken.compiled_loop_token operations = self.cpu.gc_ll_descr.rewrite_assembler(self.cpu, @@ -486,6 +681,7 @@ self._build_propagate_exception_path() if gc_ll_descr.get_malloc_slowpath_addr is not None: self._build_malloc_slowpath() + self._build_stack_check_slowpath() if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: self._build_release_gil(gc_ll_descr.gcrootmap) self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) @@ -596,6 +792,7 @@ if log: operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) + self.startpos = self.mc.currpos() regalloc = Regalloc(assembler=self, frame_manager=PPCFrameManager()) From noreply at buildbot.pypy.org Thu Mar 22 15:48:14 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 22 Mar 2012 15:48:14 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: adjust comments Message-ID: <20120322144814.9D6D17107FD@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53899:0046d0e081f7 Date: 2012-03-22 07:46 -0700 http://bitbucket.org/pypy/pypy/changeset/0046d0e081f7/ Log: adjust comments diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -390,8 +390,8 @@ # | OLD BACKCHAIN | # | | # =============================== - - # | | | > MINI FRAME (BACHCHAIN SIZE * WORD) - # | BACKCHAIN | | + # | | | + # | BACKCHAIN | | > MINI FRAME (BACHCHAIN SIZE * WORD) # | | | # =============================== - # | | @@ -404,7 +404,6 @@ # =============================== <- SP # mc = PPCBuilder() - # save argument registers and return address # make small frame to store data (parameter regs + LR + SCRATCH) in # there From noreply at buildbot.pypy.org Thu Mar 22 15:48:15 2012 From: noreply at buildbot.pypy.org (hager) Date: Thu, 22 Mar 2012 15:48:15 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: beautify code Message-ID: <20120322144815.D5FBA7107FD@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53900:4cb3a7f01438 Date: 2012-03-22 07:47 -0700 http://bitbucket.org/pypy/pypy/changeset/4cb3a7f01438/ Log: beautify code diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -485,7 +485,7 @@ # are interrupting the function. # restore link register out of preprevious frame - offset_LR = frame_size + BACKCHAIN_SIZE * WORD + WORD + offset_LR = frame_size + MINIFRAME_SIZE + WORD if IS_PPC_64: offset_LR += WORD @@ -494,7 +494,7 @@ mc.mtlr(r.SCRATCH.value) # remove this frame and the miniframe - both_framesizes = frame_size + BACKCHAIN_SIZE * WORD + both_framesizes = frame_size + MINIFRAME_SIZE mc.addi(r.SP.value, r.SP.value, both_framesizes) mc.blr() From noreply at buildbot.pypy.org Thu Mar 22 16:32:49 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Mar 2012 16:32:49 +0100 (CET) Subject: [pypy-commit] pypy stm-gc: Writing code before tests: trying to write something in order to have Message-ID: <20120322153249.D9FEB82438@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r53901:909e01366467 Date: 2012-03-22 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/909e01366467/ Log: Writing code before tests: trying to write something in order to have a clue about where to go from here. Seems that the idea can work. diff --git a/pypy/rpython/memory/gc/stmgc.py b/pypy/rpython/memory/gc/stmgc.py --- a/pypy/rpython/memory/gc/stmgc.py +++ b/pypy/rpython/memory/gc/stmgc.py @@ -102,7 +102,7 @@ def _free_nursery(self, nursery): llarena.arena_free(nursery) - def setup_thread(self, in_main_thread): + def setup_thread(self, in_main_thread=False): """Setup a thread. Allocates the thread-local data structures. Must be called only once per OS-level thread.""" tls = lltype.malloc(self.GCTLS, zero=True, flavor='raw') @@ -123,9 +123,6 @@ tls.malloc_flags = -1 # don't malloc outside a transaction! return tls - def _setup_secondary_thread(self): - self.setup_thread(False) - @staticmethod def reset_nursery(tls): """Clear and forget all locally allocated objects.""" diff --git a/pypy/rpython/memory/gctransform/stmframework.py b/pypy/rpython/memory/gctransform/stmframework.py --- a/pypy/rpython/memory/gctransform/stmframework.py +++ b/pypy/rpython/memory/gctransform/stmframework.py @@ -1,19 +1,30 @@ from pypy.rpython.memory.gctransform.framework import FrameworkGCTransformer from pypy.rpython.memory.gctransform.framework import BaseRootWalker +from pypy.rpython.memory.gctransform.framework import sizeofaddr from pypy.rpython.lltypesystem import llmemory from pypy.annotation import model as annmodel +from pypy.rlib.debug import fatalerror_notb class StmFrameworkGCTransformer(FrameworkGCTransformer): def _declare_functions(self, GCClass, getfn, s_gc, *args): + # + def setup_thread(gc): + self.root_walker.allocate_shadow_stack() + gc.setup_thread() + # + def teardown_thread(gc): + gc.teardown_thread() + self.root_walker.free_shadow_stack() + # super(StmFrameworkGCTransformer, self)._declare_functions( GCClass, getfn, s_gc, *args) self.setup_secondary_thread_ptr = getfn( - GCClass._setup_secondary_thread.im_func, + setup_thread, [s_gc], annmodel.s_None) self.teardown_thread_ptr = getfn( - GCClass.teardown_thread.im_func, + teardown_thread, [s_gc], annmodel.s_None) self.stm_writebarrier_ptr = getfn( self.gcdata.gc.stm_writebarrier, @@ -28,14 +39,8 @@ self.gcdata.gc.commit_transaction.im_func, [s_gc], annmodel.s_None) - def push_roots(self, hop, keep_current_args=False): - pass - - def pop_roots(self, hop, livevars): - pass - def build_root_walker(self): - return StmStackRootWalker(self) + return StmShadowStackRootWalker(self) def gct_stm_descriptor_init(self, hop): hop.genop("direct_call", [self.setup_secondary_thread_ptr, @@ -69,7 +74,73 @@ hop.genop("direct_call", [self.stm_commit_ptr, self.c_const_gc]) -class StmStackRootWalker(BaseRootWalker): +class StmShadowStackRootWalker(BaseRootWalker): + need_root_stack = True + root_stack_depth = 163840 + + def __init__(self, gctransformer): + from pypy.rpython.memory.gctransform import shadowstack + # + BaseRootWalker.__init__(self, gctransformer) + # we use the thread-local self.stackgcdata to store state; + # 'self' is frozen. + STACKGCDATA = lltype.Struct('STACKGCDATA', + ('root_stack_top', llmemory.Address), + ('root_stack_base', llmemory.Address), + hints={'stm_thread_local': True}) + stackgcdata = lltype.malloc(STACKGCDATA, immortal=True) + self.stackgcdata = stackgcdata + + def incr_stack(n): + top = stackgcdata.root_stack_top + stackgcdata.root_stack_top = top + n*sizeofaddr + return top + self.incr_stack = incr_stack + + def decr_stack(n): + top = stackgcdata.root_stack_top - n*sizeofaddr + stackgcdata.root_stack_top = top + return top + self.decr_stack = decr_stack + + root_iterator = shadowstack.get_root_iterator(gctransformer) + def walk_stack_root(callback, start, end): + root_iterator.setcontext(NonConstant(llmemory.NULL)) + gc = self.gc + addr = end + while True: + addr = root_iterator.nextleft(gc, start, addr) + if addr == llmemory.NULL: + return + callback(gc, addr) + self.rootstackhook = walk_stack_root + + rsd = gctransformer.root_stack_depth + if rsd is not None: + self.root_stack_depth = rsd + + def setup_root_walker(self): + self.allocate_shadow_stack() + self.gcdata.main_thread_stack_base = self.stackgcdata.root_stack_base + BaseRootWalker.setup_root_walker(self) + + def allocate_shadow_stack(self): + root_stack_size = sizeofaddr * self.root_stack_depth + base = llmemory.raw_malloc(root_stack_size) + if base == llmemory.NULL: + raise MemoryError + self.stackgcdata.root_stack_base = base + self.stackgcdata.root_stack_top = base + + def free_shadow_stack(self): + base = self.stackgcdata.root_stack_base + llmemory.raw_free(base) def walk_stack_roots(self, collect_stack_root): - raise NotImplementedError + # XXX only to walk the main thread's shadow stack, so far + stackgcdata = self.stackgcdata + if self.gcdata.main_thread_stack_base != stackgcdata.root_stack_base: + fatalerror_notb("XXX not implemented: walk_stack_roots in thread") + self.rootstackhook(collect_stack_root, + stackgcdata.root_stack_base, + stackgcdata.root_stack_top) From noreply at buildbot.pypy.org Thu Mar 22 16:33:02 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Mar 2012 16:33:02 +0100 (CET) Subject: [pypy-commit] pypy stm-gc: hg merge default Message-ID: <20120322153302.91C0D82438@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r53902:cc22c467e315 Date: 2012-03-22 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/cc22c467e315/ Log: hg merge default diff too long, truncating to 10000 out of 21783 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -486,7 +486,10 @@ L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) diff --git a/lib-python/2.7/test/test_xmlrpc.py b/lib-python/2.7/test/test_xmlrpc.py --- a/lib-python/2.7/test/test_xmlrpc.py +++ b/lib-python/2.7/test/test_xmlrpc.py @@ -308,7 +308,7 @@ global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() - #trying to connect to to "localhost" using all address families, which + #trying to connect to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) @@ -367,7 +367,7 @@ global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() - #trying to connect to to "localhost" using all address families, which + #trying to connect to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) @@ -472,6 +472,9 @@ # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) + def test_unicode_host(self): + server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT)) + self.assertEqual(server.add("a", u"\xe9"), u"a\xe9") # [ch] The test 404 is causing lots of false alarms. def XXXtest_404(self): @@ -586,6 +589,12 @@ # This avoids waiting for the socket timeout. self.test_simple1() + def test_partial_post(self): + # Check that a partial POST doesn't make the server loop: issue #14001. + conn = httplib.HTTPConnection(ADDR, PORT) + conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye') + conn.close() + class MultiPathServerTestCase(BaseServerTestCase): threadFunc = staticmethod(http_multi_server) request_count = 2 diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -311,7 +311,7 @@ RegrTest('test_mimetypes.py'), RegrTest('test_MimeWriter.py', core=False), RegrTest('test_minidom.py'), - RegrTest('test_mmap.py'), + RegrTest('test_mmap.py', usemodules="mmap"), RegrTest('test_module.py', core=True), RegrTest('test_modulefinder.py'), RegrTest('test_msilib.py', skip=only_win32), diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py --- a/lib-python/modified-2.7/ctypes/test/test_arrays.py +++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py @@ -1,12 +1,23 @@ import unittest from ctypes import * +from test.test_support import impl_detail formats = "bBhHiIlLqQfd" +# c_longdouble commented out for PyPy, look at the commend in test_longdouble formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ - c_long, c_ulonglong, c_float, c_double, c_longdouble + c_long, c_ulonglong, c_float, c_double #, c_longdouble class ArrayTestCase(unittest.TestCase): + + @impl_detail('long double not supported by PyPy', pypy=False) + def test_longdouble(self): + """ + This test is empty. It's just here to remind that we commented out + c_longdouble in "formats". If pypy will ever supports c_longdouble, we + should kill this test and uncomment c_longdouble inside formats. + """ + def test_simple(self): # create classes holding simple numeric types, and check # various properties. diff --git a/lib-python/modified-2.7/distutils/command/bdist_wininst.py b/lib-python/modified-2.7/distutils/command/bdist_wininst.py --- a/lib-python/modified-2.7/distutils/command/bdist_wininst.py +++ b/lib-python/modified-2.7/distutils/command/bdist_wininst.py @@ -298,7 +298,8 @@ bitmaplen, # number of bytes in bitmap ) file.write(header) - file.write(open(arcname, "rb").read()) + with open(arcname, "rb") as arcfile: + file.write(arcfile.read()) # create_exe() diff --git a/lib-python/modified-2.7/distutils/sysconfig_pypy.py b/lib-python/modified-2.7/distutils/sysconfig_pypy.py --- a/lib-python/modified-2.7/distutils/sysconfig_pypy.py +++ b/lib-python/modified-2.7/distutils/sysconfig_pypy.py @@ -60,6 +60,7 @@ g['EXE'] = "" g['SO'] = _get_so_extension() or ".so" g['SOABI'] = g['SO'].rsplit('.')[0] + g['LIBDIR'] = os.path.join(sys.prefix, 'lib') global _config_vars _config_vars = g diff --git a/lib-python/modified-2.7/opcode.py b/lib-python/modified-2.7/opcode.py --- a/lib-python/modified-2.7/opcode.py +++ b/lib-python/modified-2.7/opcode.py @@ -192,5 +192,6 @@ def_op('LOOKUP_METHOD', 201) # Index in name list hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' +def_op('BUILD_LIST_FROM_ARG', 203) del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib-python/modified-2.7/test/test_dis.py b/lib-python/modified-2.7/test/test_dis.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/test/test_dis.py @@ -0,0 +1,150 @@ +# Minimal tests for dis module + +from test.test_support import run_unittest +import unittest +import sys +import dis +import StringIO + + +def _f(a): + print a + return 1 + +dis_f = """\ + %-4d 0 LOAD_FAST 0 (a) + 3 PRINT_ITEM + 4 PRINT_NEWLINE + + %-4d 5 LOAD_CONST 1 (1) + 8 RETURN_VALUE +"""%(_f.func_code.co_firstlineno + 1, + _f.func_code.co_firstlineno + 2) + + +def bug708901(): + for res in range(1, + 10): + pass + +dis_bug708901 = """\ + %-4d 0 SETUP_LOOP 23 (to 26) + 3 LOAD_GLOBAL 0 (range) + 6 LOAD_CONST 1 (1) + + %-4d 9 LOAD_CONST 2 (10) + 12 CALL_FUNCTION 2 + 15 GET_ITER + >> 16 FOR_ITER 6 (to 25) + 19 STORE_FAST 0 (res) + + %-4d 22 JUMP_ABSOLUTE 16 + >> 25 POP_BLOCK + >> 26 LOAD_CONST 0 (None) + 29 RETURN_VALUE +"""%(bug708901.func_code.co_firstlineno + 1, + bug708901.func_code.co_firstlineno + 2, + bug708901.func_code.co_firstlineno + 3) + + +def bug1333982(x=[]): + assert 0, ([s for s in x] + + 1) + pass + +dis_bug1333982 = """\ + %-4d 0 LOAD_CONST 1 (0) + 3 POP_JUMP_IF_TRUE 38 + 6 LOAD_GLOBAL 0 (AssertionError) + 9 LOAD_FAST 0 (x) + 12 BUILD_LIST_FROM_ARG 0 + 15 GET_ITER + >> 16 FOR_ITER 12 (to 31) + 19 STORE_FAST 1 (s) + 22 LOAD_FAST 1 (s) + 25 LIST_APPEND 2 + 28 JUMP_ABSOLUTE 16 + + %-4d >> 31 LOAD_CONST 2 (1) + 34 BINARY_ADD + 35 RAISE_VARARGS 2 + + %-4d >> 38 LOAD_CONST 0 (None) + 41 RETURN_VALUE +"""%(bug1333982.func_code.co_firstlineno + 1, + bug1333982.func_code.co_firstlineno + 2, + bug1333982.func_code.co_firstlineno + 3) + +_BIG_LINENO_FORMAT = """\ +%3d 0 LOAD_GLOBAL 0 (spam) + 3 POP_TOP + 4 LOAD_CONST 0 (None) + 7 RETURN_VALUE +""" + +class DisTests(unittest.TestCase): + def do_disassembly_test(self, func, expected): + s = StringIO.StringIO() + save_stdout = sys.stdout + sys.stdout = s + dis.dis(func) + sys.stdout = save_stdout + got = s.getvalue() + # Trim trailing blanks (if any). + lines = got.split('\n') + lines = [line.rstrip() for line in lines] + expected = expected.split("\n") + import difflib + if expected != lines: + self.fail( + "events did not match expectation:\n" + + "\n".join(difflib.ndiff(expected, + lines))) + + def test_opmap(self): + self.assertEqual(dis.opmap["STOP_CODE"], 0) + self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst) + self.assertIn(dis.opmap["STORE_NAME"], dis.hasname) + + def test_opname(self): + self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST") + + def test_boundaries(self): + self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG) + self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT) + + def test_dis(self): + self.do_disassembly_test(_f, dis_f) + + def test_bug_708901(self): + self.do_disassembly_test(bug708901, dis_bug708901) + + def test_bug_1333982(self): + # This one is checking bytecodes generated for an `assert` statement, + # so fails if the tests are run with -O. Skip this test then. + if __debug__: + self.do_disassembly_test(bug1333982, dis_bug1333982) + + def test_big_linenos(self): + def func(count): + namespace = {} + func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"]) + exec func in namespace + return namespace['foo'] + + # Test all small ranges + for i in xrange(1, 300): + expected = _BIG_LINENO_FORMAT % (i + 2) + self.do_disassembly_test(func(i), expected) + + # Test some larger ranges too + for i in xrange(300, 5000, 10): + expected = _BIG_LINENO_FORMAT % (i + 2) + self.do_disassembly_test(func(i), expected) + +def test_main(): + run_unittest(DisTests) + + +if __name__ == "__main__": + test_main() diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py --- a/lib_pypy/_csv.py +++ b/lib_pypy/_csv.py @@ -414,7 +414,7 @@ def _parse_add_char(self, c): if len(self.field) + len(c) > _field_limit: - raise Error("field larget than field limit (%d)" % (_field_limit)) + raise Error("field larger than field limit (%d)" % (_field_limit)) self.field += c diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,9 +1,9 @@ - +import _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import CArgObject +from _ctypes.basics import CArgObject, as_ffi_pointer class ArrayMeta(_CDataMeta): def __new__(self, name, cls, typedict): @@ -211,6 +211,9 @@ def _to_ffi_param(self): return self._get_buffer_value() + def _as_ffi_pointer_(self, ffitype): + return as_ffi_pointer(self, ffitype) + ARRAY_CACHE = {} def create_array_type(base, length): @@ -228,5 +231,6 @@ _type_ = base ) cls = ArrayMeta(name, (Array,), tpdict) + cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype()) ARRAY_CACHE[key] = cls return cls diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -230,5 +230,16 @@ } +# called from primitive.py, pointer.py, array.py +def as_ffi_pointer(value, ffitype): + my_ffitype = type(value).get_ffi_argtype() + # for now, we always allow types.pointer, else a lot of tests + # break. We need to rethink how pointers are represented, though + if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) + return value._get_buffer_value() + + # used by "byref" from _ctypes.pointer import pointer diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -31,24 +31,20 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(local): - def __init__(self): - self.errno = 0 - self.winerror = 0 -_error_object = ErrorObject() +_err = local() def get_errno(): - return _error_object.errno + return getattr(_err, "errno", 0) def set_errno(errno): - old_errno = _error_object.errno - _error_object.errno = errno + old_errno = get_errno() + _err.errno = errno return old_errno def get_last_error(): - return _error_object.winerror + return getattr(_err, "winerror", 0) def set_last_error(winerror): - old_winerror = _error_object.winerror - _error_object.winerror = winerror + old_winerror = get_last_error() + _err.winerror = winerror return old_winerror diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -3,7 +3,7 @@ from _ctypes.primitive import SimpleType, _SimpleCData from _ctypes.basics import ArgumentError, keepalive_key from _ctypes.basics import is_struct_shape -from _ctypes.builtin import set_errno, set_last_error +from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi import _ffi import sys @@ -350,16 +350,24 @@ def _call_funcptr(self, funcptr, *newargs): if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: - set_errno(_rawffi.get_errno()) + tmp = _rawffi.get_errno() + _rawffi.set_errno(get_errno()) + set_errno(tmp) if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: - set_last_error(_rawffi.get_last_error()) + tmp = _rawffi.get_last_error() + _rawffi.set_last_error(get_last_error()) + set_last_error(tmp) try: result = funcptr(*newargs) finally: if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: - set_errno(_rawffi.get_errno()) + tmp = _rawffi.get_errno() + _rawffi.set_errno(get_errno()) + set_errno(tmp) if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: - set_last_error(_rawffi.get_last_error()) + tmp = _rawffi.get_last_error() + _rawffi.set_last_error(get_last_error()) + set_last_error(tmp) # try: return self._build_result(self._restype_, result, newargs) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -3,7 +3,7 @@ import _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects -from _ctypes.basics import sizeof, byref +from _ctypes.basics import sizeof, byref, as_ffi_pointer from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\ array_slice_setitem @@ -119,14 +119,6 @@ def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) -def as_ffi_pointer(value, ffitype): - my_ffitype = type(value).get_ffi_argtype() - # for now, we always allow types.pointer, else a lot of tests - # break. We need to rethink how pointers are represented, though - if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError("expected %s instance, got %s" % (type(value), - ffitype)) - return value._get_buffer_value() def _cast_addr(obj, _, tp): if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()): diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -2,16 +2,95 @@ # One-liner implementation of cPickle # -from pickle import * +from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass from pickle import __doc__, __version__, format_version, compatible_formats +from types import * +from copy_reg import dispatch_table +from copy_reg import _extension_registry, _inverted_registry, _extension_cache +import marshal, struct, sys try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +# These are purely informational; no code uses these. +format_version = "2.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + ] # Old format versions we can read + +# Keep in synch with cPickle. This is the highest protocol number we +# know how to read. +HIGHEST_PROTOCOL = 2 BadPickleGet = KeyError UnpickleableError = PicklingError +MARK = ord('(') # push special markobject on stack +STOP = ord('.') # every pickle ends with STOP +POP = ord('0') # discard topmost stack item +POP_MARK = ord('1') # discard stack top through topmost markobject +DUP = ord('2') # duplicate top stack item +FLOAT = ord('F') # push float object; decimal string argument +INT = ord('I') # push integer or bool; decimal string argument +BININT = ord('J') # push four-byte signed int +BININT1 = ord('K') # push 1-byte unsigned int +LONG = ord('L') # push long; decimal string argument +BININT2 = ord('M') # push 2-byte unsigned int +NONE = ord('N') # push None +PERSID = ord('P') # push persistent object; id is taken from string arg +BINPERSID = ord('Q') # " " " ; " " " " stack +REDUCE = ord('R') # apply callable to argtuple, both on stack +STRING = ord('S') # push string; NL-terminated string argument +BINSTRING = ord('T') # push string; counted binary string argument +SHORT_BINSTRING = ord('U') # " " ; " " " " < 256 bytes +UNICODE = ord('V') # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = ord('X') # " " " ; counted UTF-8 string argument +APPEND = ord('a') # append stack top to list below it +BUILD = ord('b') # call __setstate__ or __dict__.update() +GLOBAL = ord('c') # push self.find_class(modname, name); 2 string args +DICT = ord('d') # build a dict from stack items +EMPTY_DICT = ord('}') # push empty dict +APPENDS = ord('e') # extend list on stack by topmost stack slice +GET = ord('g') # push item from memo on stack; index is string arg +BINGET = ord('h') # " " " " " " ; " " 1-byte arg +INST = ord('i') # build & push class instance +LONG_BINGET = ord('j') # push item from memo on stack; index is 4-byte arg +LIST = ord('l') # build list from topmost stack items +EMPTY_LIST = ord(']') # push empty list +OBJ = ord('o') # build & push class instance +PUT = ord('p') # store stack top in memo; index is string arg +BINPUT = ord('q') # " " " " " ; " " 1-byte arg +LONG_BINPUT = ord('r') # " " " " " ; " " 4-byte arg +SETITEM = ord('s') # add key+value pair to dict +TUPLE = ord('t') # build tuple from topmost stack items +EMPTY_TUPLE = ord(')') # push empty tuple +SETITEMS = ord('u') # modify dict by adding topmost key+value pairs +BINFLOAT = ord('G') # push float; arg is 8-byte float encoding + +TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = ord('\x80') # identify pickle protocol +NEWOBJ = ord('\x81') # build object by applying cls.__new__ to argtuple +EXT1 = ord('\x82') # push object from extension registry; 1-byte index +EXT2 = ord('\x83') # ditto, but 2-byte index +EXT4 = ord('\x84') # ditto, but 4-byte index +TUPLE1 = ord('\x85') # build 1-tuple from stack top +TUPLE2 = ord('\x86') # build 2-tuple from two topmost stack items +TUPLE3 = ord('\x87') # build 3-tuple from three topmost stack items +NEWTRUE = ord('\x88') # push True +NEWFALSE = ord('\x89') # push False +LONG1 = ord('\x8a') # push long from < 256 bytes +LONG4 = ord('\x8b') # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + + # ____________________________________________________________ # XXX some temporary dark magic to produce pickled dumps that are # closer to the ones produced by cPickle in CPython @@ -44,3 +123,474 @@ file = StringIO() Pickler(file, protocol).dump(obj) return file.getvalue() + +# Why use struct.pack() for pickling but marshal.loads() for +# unpickling? struct.pack() is 40% faster than marshal.dumps(), but +# marshal.loads() is twice as fast as struct.unpack()! +mloads = marshal.loads + +# Unpickling machinery + +class Unpickler(object): + + def __init__(self, file): + """This takes a file-like object for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so no + proto argument is needed. + + The file-like object must have two methods, a read() method that + takes an integer argument, and a readline() method that requires no + arguments. Both methods should return a string. Thus file-like + object can be a file object opened for reading, a StringIO object, + or any other custom object that meets this interface. + """ + self.readline = file.readline + self.read = file.read + self.memo = {} + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + self.mark = object() # any new unique object + self.stack = [] + self.append = self.stack.append + try: + key = ord(self.read(1)) + while key != STOP: + self.dispatch[key](self) + key = ord(self.read(1)) + except TypeError: + if self.read(1) == '': + raise EOFError + raise + return self.stack.pop() + + # Return largest index k such that self.stack[k] is self.mark. + # If the stack doesn't contain a mark, eventually raises IndexError. + # This could be sped by maintaining another stack, of indices at which + # the mark appears. For that matter, the latter stack would suffice, + # and we wouldn't need to push mark objects on self.stack at all. + # Doing so is probably a good thing, though, since if the pickle is + # corrupt (or hostile) we may get a clue from finding self.mark embedded + # in unpickled objects. + def marker(self): + k = len(self.stack)-1 + while self.stack[k] is not self.mark: k -= 1 + return k + + dispatch = {} + + def load_proto(self): + proto = ord(self.read(1)) + if not 0 <= proto <= 2: + raise ValueError, "unsupported pickle protocol: %d" % proto + dispatch[PROTO] = load_proto + + def load_persid(self): + pid = self.readline()[:-1] + self.append(self.persistent_load(pid)) + dispatch[PERSID] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + try: + val = int(data) + except ValueError: + val = long(data) + self.append(val) + dispatch[INT] = load_int + + def load_binint(self): + self.append(mloads('i' + self.read(4))) + dispatch[BININT] = load_binint + + def load_binint1(self): + self.append(ord(self.read(1))) + dispatch[BININT1] = load_binint1 + + def load_binint2(self): + self.append(mloads('i' + self.read(2) + '\000\000')) + dispatch[BININT2] = load_binint2 + + def load_long(self): + self.append(long(self.readline()[:-1], 0)) + dispatch[LONG] = load_long + + def load_long1(self): + n = ord(self.read(1)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG1] = load_long1 + + def load_long4(self): + n = mloads('i' + self.read(4)) + bytes = self.read(n) + self.append(decode_long(bytes)) + dispatch[LONG4] = load_long4 + + def load_float(self): + self.append(float(self.readline()[:-1])) + dispatch[FLOAT] = load_float + + def load_binfloat(self, unpack=struct.unpack): + self.append(unpack('>d', self.read(8))[0]) + dispatch[BINFLOAT] = load_binfloat + + def load_string(self): + rep = self.readline() + if len(rep) < 3: + raise ValueError, "insecure string pickle" + if rep[0] == "'" == rep[-2]: + rep = rep[1:-2] + elif rep[0] == '"' == rep[-2]: + rep = rep[1:-2] + else: + raise ValueError, "insecure string pickle" + self.append(rep.decode("string-escape")) + dispatch[STRING] = load_string + + def load_binstring(self): + L = mloads('i' + self.read(4)) + self.append(self.read(L)) + dispatch[BINSTRING] = load_binstring + + def load_unicode(self): + self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) + dispatch[UNICODE] = load_unicode + + def load_binunicode(self): + L = mloads('i' + self.read(4)) + self.append(unicode(self.read(L),'utf-8')) + dispatch[BINUNICODE] = load_binunicode + + def load_short_binstring(self): + L = ord(self.read(1)) + self.append(self.read(L)) + dispatch[SHORT_BINSTRING] = load_short_binstring + + def load_tuple(self): + k = self.marker() + self.stack[k:] = [tuple(self.stack[k+1:])] + dispatch[TUPLE] = load_tuple + + def load_empty_tuple(self): + self.stack.append(()) + dispatch[EMPTY_TUPLE] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3] = load_tuple3 + + def load_empty_list(self): + self.stack.append([]) + dispatch[EMPTY_LIST] = load_empty_list + + def load_empty_dictionary(self): + self.stack.append({}) + dispatch[EMPTY_DICT] = load_empty_dictionary + + def load_list(self): + k = self.marker() + self.stack[k:] = [self.stack[k+1:]] + dispatch[LIST] = load_list + + def load_dict(self): + k = self.marker() + d = {} + items = self.stack[k+1:] + for i in range(0, len(items), 2): + key = items[i] + value = items[i+1] + d[key] = value + self.stack[k:] = [d] + dispatch[DICT] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, k): + args = tuple(self.stack[k+1:]) + del self.stack[k:] + instantiated = 0 + if (not args and + type(klass) is ClassType and + not hasattr(klass, "__getinitargs__")): + try: + value = _EmptyClass() + value.__class__ = klass + instantiated = 1 + except RuntimeError: + # In restricted execution, assignment to inst.__class__ is + # prohibited + pass + if not instantiated: + try: + value = klass(*args) + except TypeError, err: + raise TypeError, "in constructor for %s: %s" % ( + klass.__name__, str(err)), sys.exc_info()[2] + self.append(value) + + def load_inst(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self._instantiate(klass, self.marker()) + dispatch[INST] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + k = self.marker() + klass = self.stack.pop(k+1) + self._instantiate(klass, k) + dispatch[OBJ] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack[-1] + obj = cls.__new__(cls, *args) + self.stack[-1] = obj + dispatch[NEWOBJ] = load_newobj + + def load_global(self): + module = self.readline()[:-1] + name = self.readline()[:-1] + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL] = load_global + + def load_ext1(self): + code = ord(self.read(1)) + self.get_extension(code) + dispatch[EXT1] = load_ext1 + + def load_ext2(self): + code = mloads('i' + self.read(2) + '\000\000') + self.get_extension(code) + dispatch[EXT2] = load_ext2 + + def load_ext4(self): + code = mloads('i' + self.read(4)) + self.get_extension(code) + dispatch[EXT4] = load_ext4 + + def get_extension(self, code): + nil = [] + obj = _extension_cache.get(code, nil) + if obj is not nil: + self.append(obj) + return + key = _inverted_registry.get(code) + if not key: + raise ValueError("unregistered extension code %d" % code) + obj = self.find_class(*key) + _extension_cache[code] = obj + self.append(obj) + + def find_class(self, module, name): + # Subclasses may override this + __import__(module) + mod = sys.modules[module] + klass = getattr(mod, name) + return klass + + def load_reduce(self): + args = self.stack.pop() + func = self.stack[-1] + value = self.stack[-1](*args) + self.stack[-1] = value + dispatch[REDUCE] = load_reduce + + def load_pop(self): + del self.stack[-1] + dispatch[POP] = load_pop + + def load_pop_mark(self): + k = self.marker() + del self.stack[k:] + dispatch[POP_MARK] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP] = load_dup + + def load_get(self): + self.append(self.memo[self.readline()[:-1]]) + dispatch[GET] = load_get + + def load_binget(self): + i = ord(self.read(1)) + self.append(self.memo[repr(i)]) + dispatch[BINGET] = load_binget + + def load_long_binget(self): + i = mloads('i' + self.read(4)) + self.append(self.memo[repr(i)]) + dispatch[LONG_BINGET] = load_long_binget + + def load_put(self): + self.memo[self.readline()[:-1]] = self.stack[-1] + dispatch[PUT] = load_put + + def load_binput(self): + i = ord(self.read(1)) + self.memo[repr(i)] = self.stack[-1] + dispatch[BINPUT] = load_binput + + def load_long_binput(self): + i = mloads('i' + self.read(4)) + self.memo[repr(i)] = self.stack[-1] + dispatch[LONG_BINPUT] = load_long_binput + + def load_append(self): + value = self.stack.pop() + self.stack[-1].append(value) + dispatch[APPEND] = load_append + + def load_appends(self): + stack = self.stack + mark = self.marker() + lst = stack[mark - 1] + lst.extend(stack[mark + 1:]) + del stack[mark:] + dispatch[APPENDS] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM] = load_setitem + + def load_setitems(self): + stack = self.stack + mark = self.marker() + dict = stack[mark - 1] + for i in range(mark + 1, len(stack), 2): + dict[stack[i]] = stack[i + 1] + + del stack[mark:] + dispatch[SETITEMS] = load_setitems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + try: + d = inst.__dict__ + try: + for k, v in state.iteritems(): + d[intern(k)] = v + # keys in state don't have to be strings + # don't blow up, but don't go out of our way + except TypeError: + d.update(state) + + except RuntimeError: + # XXX In restricted execution, the instance's __dict__ + # is not accessible. Use the old way of unpickling + # the instance variables. This is a semantic + # difference when unpickling in restricted + # vs. unrestricted modes. + # Note, however, that cPickle has never tried to do the + # .update() business, and always uses + # PyObject_SetItem(inst.__dict__, key, value) in a + # loop over state.items(). + for k, v in state.items(): + setattr(inst, k, v) + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD] = load_build + + def load_mark(self): + self.append(self.mark) + dispatch[MARK] = load_mark + +#from pickle import decode_long + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long('') + 0L + >>> decode_long("\xff\x00") + 255L + >>> decode_long("\xff\x7f") + 32767L + >>> decode_long("\x00\xff") + -256L + >>> decode_long("\x00\x80") + -32768L + >>> decode_long("\x80") + -128L + >>> decode_long("\x7f") + 127L + """ + + nbytes = len(data) + if nbytes == 0: + return 0L + ind = nbytes - 1 + while ind and ord(data[ind]) == 0: + ind -= 1 + n = ord(data[ind]) + while ind: + n <<= 8 + ind -= 1 + if ord(data[ind]): + n += ord(data[ind]) + if ord(data[nbytes - 1]) >= 128: + n -= 1L << (nbytes << 3) + return n + +def load(f): + return Unpickler(f).load() + +def loads(str): + f = StringIO(str) + return Unpickler(f).load() diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1032,8 +1032,8 @@ def __setstate(self, string): if len(string) != 4 or not (1 <= ord(string[2]) <= 12): raise TypeError("not enough arguments") - yhi, ylo, self._month, self._day = map(ord, string) - self._year = yhi * 256 + ylo + self._month, self._day = ord(string[2]), ord(string[3]) + self._year = ord(string[0]) * 256 + ord(string[1]) def __reduce__(self): return (self.__class__, self._getstate()) @@ -1421,9 +1421,10 @@ def __setstate(self, string, tzinfo): if len(string) != 6 or ord(string[0]) >= 24: raise TypeError("an integer is required") - self._hour, self._minute, self._second, us1, us2, us3 = \ - map(ord, string) - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._hour, self._minute, self._second = ord(string[0]), \ + ord(string[1]), ord(string[2]) + self._microsecond = (((ord(string[3]) << 8) | \ + ord(string[4])) << 8) | ord(string[5]) self._tzinfo = tzinfo def __reduce__(self): @@ -1903,10 +1904,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = map(ord, string) - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 + (self._month, self._day, self._hour, self._minute, + self._second) = (ord(string[2]), ord(string[3]), ord(string[4]), + ord(string[5]), ord(string[6])) + self._year = ord(string[0]) * 256 + ord(string[1]) + self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9]) self._tzinfo = tzinfo def __reduce__(self): diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -6,7 +6,7 @@ import _numpypy as multiarray # ARGH from numpypy.core.arrayprint import array2string - +newaxis = None def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): """ @@ -319,4 +319,4 @@ False_ = bool_(False) True_ = bool_(True) e = math.e -pi = math.pi \ No newline at end of file +pi = math.pi diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/pypy/__init__.py b/pypy/__init__.py --- a/pypy/__init__.py +++ b/pypy/__init__.py @@ -1,1 +1,16 @@ # Empty + +# XXX Should be empty again, soon. +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py --- a/pypy/annotation/builtin.py +++ b/pypy/annotation/builtin.py @@ -37,7 +37,11 @@ try: realresult = func(*args) except (ValueError, OverflowError): - return s_ImpossibleValue # no possible answer for this precise input + # no possible answer for this precise input. Be conservative + # and keep the computation non-constant. Example: + # unichr(constant-that-doesn't-fit-16-bits) on platforms where + # the underlying Python has sys.maxunicode == 0xffff. + return s_result s_realresult = immutablevalue(realresult) if not s_result.contains(s_realresult): raise Exception("%s%r returned %r, which is not contained in %s" % ( @@ -163,7 +167,7 @@ r.const = False return r - assert not issubclass(typ, (int,long)) or typ in (bool, int), ( + assert not issubclass(typ, (int, long)) or typ in (bool, int, long), ( "for integers only isinstance(.,int|r_uint) are supported") if s_obj.is_constant(): @@ -297,7 +301,7 @@ def robjmodel_instantiate(s_clspbc): assert isinstance(s_clspbc, SomePBC) clsdef = None - more_than_one = len(s_clspbc.descriptions) + more_than_one = len(s_clspbc.descriptions) > 1 for desc in s_clspbc.descriptions: cdef = desc.getuniqueclassdef() if more_than_one: diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -134,13 +134,19 @@ if self.name not in homedef.classdesc.all_enforced_attrs: self.attr_allowed = False if not self.readonly: - raise NoSuchAttrError(homedef, self.name) + raise NoSuchAttrError( + "setting forbidden attribute %r on %r" % ( + self.name, homedef)) def modified(self, classdef='?'): self.readonly = False if not self.attr_allowed: - raise NoSuchAttrError(classdef, self.name) - + raise NoSuchAttrError( + "Attribute %r on %r should be read-only.\n" % (self.name, + classdef) + + "This error can be caused by another 'getattr' that promoted\n" + "the attribute here; the list of read locations is:\n" + + '\n'.join([str(loc[0]) for loc in self.read_locations])) class ClassDef(object): "Wraps a user class." diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -398,7 +398,6 @@ cls = pyobj base = object baselist = list(cls.__bases__) - baselist.reverse() # special case: skip BaseException in Python 2.5, and pretend # that all exceptions ultimately inherit from Exception instead @@ -408,17 +407,27 @@ elif baselist == [py.builtin.BaseException]: baselist = [Exception] + mixins_before = [] + mixins_after = [] for b1 in baselist: if b1 is object: continue if b1.__dict__.get('_mixin_', False): - self.add_mixin(b1) + if base is object: + mixins_before.append(b1) + else: + mixins_after.append(b1) else: assert base is object, ("multiple inheritance only supported " "with _mixin_: %r" % (cls,)) base = b1 + if mixins_before and mixins_after: + raise Exception("unsupported: class %r has mixin bases both" + " before and after the regular base" % (self,)) + self.add_mixins(mixins_after, check_not_in=base) + self.add_mixins(mixins_before) + self.add_sources_for_class(cls) - self.add_sources_for_class(cls) if base is not object: self.basedesc = bookkeeper.getdesc(base) @@ -480,14 +489,30 @@ return self.classdict[name] = Constant(value) - def add_mixin(self, base): - for subbase in base.__bases__: - if subbase is object: - continue - assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non" - "mixin base class %r" % (base, subbase)) - self.add_mixin(subbase) - self.add_sources_for_class(base, mixin=True) + def add_mixins(self, mixins, check_not_in=object): + if not mixins: + return + A = type('tmp', tuple(mixins) + (object,), {}) + mro = A.__mro__ + assert mro[0] is A and mro[-1] is object + mro = mro[1:-1] + # + skip = set() + def add(cls): + if cls is not object: + for base in cls.__bases__: + add(base) + for name in cls.__dict__: + skip.add(name) + add(check_not_in) + # + for base in reversed(mro): + assert base.__dict__.get("_mixin_", False), ("Mixin class %r has non" + "mixin base class %r" % (mixins, base)) + for name, value in base.__dict__.items(): + if name in skip: + continue + self.add_source_attribute(name, value, mixin=True) def add_sources_for_class(self, cls, mixin=False): for name, value in cls.__dict__.items(): diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -786,12 +786,15 @@ # # safety check that no-one is trying to make annotation and translation # faster by providing the -O option to Python. -try: - assert False -except AssertionError: - pass # fine -else: - raise RuntimeError("The annotator relies on 'assert' statements from the\n" +import os +if "WINGDB_PYTHON" not in os.environ: + # ...but avoiding this boring check in the IDE + try: + assert False + except AssertionError: + pass # fine + else: + raise RuntimeError("The annotator relies on 'assert' statements from the\n" "\tannotated program: you cannot run it with 'python -O'.") # this has the side-effect of registering the unary and binary operations diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -1,15 +1,12 @@ from __future__ import with_statement -import autopath import py.test import sys from pypy import conftest -from pypy.tool.udir import udir from pypy.annotation import model as annmodel from pypy.annotation.annrpython import RPythonAnnotator as _RPythonAnnotator from pypy.translator.translator import graphof as tgraphof from pypy.annotation import policy -from pypy.annotation import specialize from pypy.annotation.listdef import ListDef, ListChangeUnallowed from pypy.annotation.dictdef import DictDef from pypy.objspace.flow.model import * @@ -2431,6 +2428,93 @@ assert isinstance(s.items[1], annmodel.SomeChar) assert isinstance(s.items[2], annmodel.SomeChar) + def test_mixin_first(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Base(object): + def foo(self): return 5 + class Concrete(Mixin, Base): + pass + def f(): + return Concrete().foo() + + assert f() == 4 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 4 + + def test_mixin_last(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Base(object): + def foo(self): return 5 + class Concrete(Base, Mixin): + pass + def f(): + return Concrete().foo() + + assert f() == 5 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 5 + + def test_mixin_concrete(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Concrete(Mixin): + def foo(self): return 5 + def f(): + return Concrete().foo() + + assert f() == 5 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 5 + + def test_multiple_mixins_mro(self): + # an obscure situation, but it occurred in module/micronumpy/types.py + class A(object): + _mixin_ = True + def foo(self): return 1 + class B(A): + _mixin_ = True + def foo(self): return 2 + class C(A): + _mixin_ = True + class D(B, C): + _mixin_ = True + class Concrete(D): + pass + def f(): + return Concrete().foo() + + assert f() == 2 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 2 + + def test_multiple_mixins_mro_2(self): + class A(object): + _mixin_ = True + def foo(self): return 1 + class B(A): + _mixin_ = True + def foo(self): return 2 + class C(A): + _mixin_ = True + class Concrete(C, B): + pass + def f(): + return Concrete().foo() + + assert f() == 2 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 2 + def test___class___attribute(self): class Base(object): pass class A(Base): pass @@ -2469,6 +2553,26 @@ s = a.build_types(f, [int]) assert s.knowntype == int + def test_slots_reads(self): + class A(object): + __slots__ = () + class B(A): + def __init__(self, x): + self.x = x + def f(x): + if x: + a = A() + else: + a = B(x) + return a.x # should explode here + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + # this should explode on reading the attribute 'a.x', but it can + # sometimes explode on 'self.x = x', which does not make much sense. + # But it looks hard to fix in general: we don't know yet during 'a.x' + # if the attribute x will be read-only or read-write. + def test_unboxed_value(self): class A(object): __slots__ = () diff --git a/pypy/bin/rpython b/pypy/bin/rpython new file mode 100755 --- /dev/null +++ b/pypy/bin/rpython @@ -0,0 +1,18 @@ +#!/usr/bin/env pypy + +"""RPython translation usage: + +rpython target + +run with --help for more information +""" + +import sys +from pypy.translator.goal.translate import main + +# no implicit targets +if len(sys.argv) == 1: + print __doc__ + sys.exit(1) + +main() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix"] + ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] ) default_modules = essential_modules.copy() @@ -176,9 +176,6 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("geninterp", "specify whether geninterp should be used", - default=False), - BoolOption("logbytecodes", "keep track of bytecode usage", default=False), @@ -393,10 +390,6 @@ config.objspace.std.suggest(withsmalllong=True) # xxx other options? ropes maybe? - # completely disable geninterp in a level 0 translation - if level == '0': - config.objspace.suggest(geninterp=False) - # some optimizations have different effects depending on the typesystem if type_system == 'ootype': config.objspace.std.suggest(multimethods="doubledispatch") diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -188,11 +188,6 @@ # Flags of the TranslationContext: BoolOption("simplifying", "Simplify flow graphs", default=True), - BoolOption("builtins_can_raise_exceptions", - "When true, assume any call to a 'simple' builtin such as " - "'hex' can raise an arbitrary exception", - default=False, - cmdline=None), BoolOption("list_comprehension_operations", "When true, look for and special-case the sequence of " "operations that results from a list comprehension and " diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -313,5 +313,10 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* the ``__dict__`` attribute of new-style classes returns a normal dict, as + opposed to a dict proxy like in CPython. Mutating the dict will change the + type and vice versa. For builtin types, a dictionary will be returned that + cannot be changed (but still looks and behaves like a normal dictionary). + .. include:: _ref.txt diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -610,6 +610,8 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + + ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -965,7 +965,7 @@ self.emit_op_arg(ops.CALL_METHOD, (kwarg_count << 8) | arg_count) return True - def _listcomp_generator(self, gens, gen_index, elt): + def _listcomp_generator(self, gens, gen_index, elt, single=False): start = self.new_block() skip = self.new_block() if_cleanup = self.new_block() @@ -973,6 +973,8 @@ gen = gens[gen_index] assert isinstance(gen, ast.comprehension) gen.iter.walkabout(self) + if single: + self.emit_op_arg(ops.BUILD_LIST_FROM_ARG, 0) self.emit_op(ops.GET_ITER) self.use_next_block(start) self.emit_jump(ops.FOR_ITER, anchor) @@ -998,8 +1000,12 @@ def visit_ListComp(self, lc): self.update_position(lc.lineno) - self.emit_op_arg(ops.BUILD_LIST, 0) - self._listcomp_generator(lc.generators, 0, lc.elt) + if len(lc.generators) != 1 or lc.generators[0].ifs: + single = False + self.emit_op_arg(ops.BUILD_LIST, 0) + else: + single = True + self._listcomp_generator(lc.generators, 0, lc.elt, single=single) def _comp_generator(self, node, generators, gen_index): start = self.new_block() diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -58,7 +58,8 @@ w_res = pyco_expr.exec_host_bytecode(w_dict, w_dict) res = space.str_w(space.repr(w_res)) if not isinstance(expected, float): - assert res == repr(expected) + noL = lambda expr: expr.replace('L', '') + assert noL(res) == noL(repr(expected)) else: # Float representation can vary a bit between interpreter # versions, compare the numbers instead. @@ -908,3 +909,17 @@ return d['f'](5) """) assert 'generator' in space.str_w(space.repr(w_generator)) + + def test_list_comprehension(self): + source = "def f(): [i for i in l]" + source2 = "def f(): [i for i in l for j in l]" + source3 = "def f(): [i for i in l if i]" + counts = self.count_instructions(source) + assert ops.BUILD_LIST not in counts + assert counts[ops.BUILD_LIST_FROM_ARG] == 1 + counts = self.count_instructions(source2) + assert counts[ops.BUILD_LIST] == 1 + assert ops.BUILD_LIST_FROM_ARG not in counts + counts = self.count_instructions(source3) + assert counts[ops.BUILD_LIST] == 1 + assert ops.BUILD_LIST_FROM_ARG not in counts diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -7,7 +7,8 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id +from pypy.rlib.objectmodel import we_are_translated, newlist_hint,\ + compute_unique_id from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint @@ -328,7 +329,7 @@ raise modname = self.str_w(w_modname) mod = self.interpclass_w(w_mod) - if isinstance(mod, Module): + if isinstance(mod, Module) and not mod.startup_called: self.timer.start("startup " + modname) mod.init(self) self.timer.stop("startup " + modname) @@ -833,7 +834,7 @@ items = [] else: try: - items = newlist(lgt_estimate) + items = newlist_hint(lgt_estimate) except MemoryError: items = [] # it might have lied # @@ -1335,7 +1336,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) @@ -1471,8 +1472,8 @@ def warn(self, msg, w_warningcls): self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls): - import warnings - warnings.warn(msg, warningcls, stacklevel=2) + import _warnings + _warnings.warn(msg, warningcls, stacklevel=2) """) def resolve_target(self, w_obj): diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -20,6 +20,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError from pypy.rlib.objectmodel import compute_hash +from pypy.rlib.rstring import StringBuilder class Buffer(Wrappable): @@ -152,12 +153,13 @@ if space.isinstance_w(w_object, space.w_unicode): # unicode objects support the old buffer interface # but not the new buffer interface (change in python 2.7) - from pypy.rlib.rstruct.unichar import pack_unichar - charlist = [] - for unich in space.unicode_w(w_object): - pack_unichar(unich, charlist) + from pypy.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + unistr = space.unicode_w(w_object) + builder = StringBuilder(len(unistr) * UNICODE_SIZE) + for unich in unistr: + pack_unichar(unich, builder) from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(''.join(charlist))) + w_buffer = space.wrap(StringBuffer(builder.build())) else: w_buffer = space.buffer(w_object) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -47,6 +47,11 @@ def async(self, space): "Check if this is an exception that should better not be caught." + if not space.full_exceptions: + # flow objspace does not support such exceptions and more + # importantly, raises KeyboardInterrupt if you try to access + # space.w_KeyboardInterrupt + return False return (self.match(space, space.w_SystemExit) or self.match(space, space.w_KeyboardInterrupt)) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -901,24 +901,20 @@ def __init__(self, source, filename=None, modname='__builtin__'): # HAAACK (but a good one) + self.filename = filename + self.source = str(py.code.Source(source).deindent()) + self.modname = modname if filename is None: f = sys._getframe(1) filename = '<%s:%d>' % (f.f_code.co_filename, f.f_lineno) + if not os.path.exists(filename): + # make source code available for tracebacks + lines = [x + "\n" for x in source.split("\n")] + py.std.linecache.cache[filename] = (1, None, lines, filename) self.filename = filename - self.source = str(py.code.Source(source).deindent()) - self.modname = modname - # look at the first three lines for a NOT_RPYTHON tag - first = "\n".join(source.split("\n", 3)[:3]) - if "NOT_RPYTHON" in first: - self.can_use_geninterp = False - else: - self.can_use_geninterp = True - # make source code available for tracebacks - lines = [x + "\n" for x in source.split("\n")] - py.std.linecache.cache[filename] = (1, None, lines, filename) def __repr__(self): - return "" % (self.filename, self.can_use_geninterp) + return "" % (self.filename,) def getwdict(self, space): return space.fromcache(ApplevelCache).getorbuild(self) @@ -979,10 +975,7 @@ def build(self, app): "NOT_RPYTHON. Called indirectly by Applevel.getwdict()." - if self.space.config.objspace.geninterp and app.can_use_geninterp: - return PyPyCacheDir.build_applevelinterp_dict(app, self.space) - else: - return build_applevel_dict(app, self.space) + return build_applevel_dict(app, self.space) # __________ pure applevel version __________ @@ -996,157 +989,6 @@ filename=self.filename) return w_glob -# __________ geninterplevel version __________ - -class PyPyCacheDir: - "NOT_RPYTHON" - # similar to applevel, but using translation to interp-level. - # This version maintains a cache folder with single files. - - def build_applevelinterp_dict(cls, self, space): - "NOT_RPYTHON" - # N.B. 'self' is the ApplevelInterp; this is a class method, - # just so that we have a convenient place to store the global state. - if not cls._setup_done: - cls._setup() - - from pypy.translator.geninterplevel import translate_as_module - import marshal - scramble = md5(cls.seed) - scramble.update(marshal.dumps(self.source)) - key = scramble.hexdigest() - initfunc = cls.known_code.get(key) - if not initfunc: - # try to get it from file - name = key - if self.filename: - prename = os.path.splitext(os.path.basename(self.filename))[0] - else: - prename = 'zznoname' - name = "%s_%s" % (prename, name) - try: - __import__("pypy._cache."+name) - except ImportError, x: - # print x - pass - else: - initfunc = cls.known_code[key] - if not initfunc: - # build it and put it into a file - initfunc, newsrc = translate_as_module( - self.source, self.filename, self.modname) - fname = cls.cache_path.join(name+".py").strpath - f = file(get_tmp_file_name(fname), "w") - print >> f, """\ -# self-destruct on double-click: -if __name__ == "__main__": - from pypy import _cache - import os - namestart = os.path.join(os.path.split(_cache.__file__)[0], '%s') - for ending in ('.py', '.pyc', '.pyo'): - try: - os.unlink(namestart+ending) - except os.error: - pass""" % name - print >> f - print >> f, newsrc - print >> f, "from pypy._cache import known_code" - print >> f, "known_code[%r] = %s" % (key, initfunc.__name__) - f.close() - rename_tmp_to_eventual_file_name(fname) - w_glob = initfunc(space) - return w_glob - build_applevelinterp_dict = classmethod(build_applevelinterp_dict) - - _setup_done = False - - def _setup(cls): - """NOT_RPYTHON""" - lp = py.path.local - import pypy, os - p = lp(pypy.__file__).new(basename='_cache').ensure(dir=1) - cls.cache_path = p - ini = p.join('__init__.py') - try: - if not ini.check(): - raise ImportError # don't import if only a .pyc file left!!! - from pypy._cache import known_code, \ - GI_VERSION_RENDERED - except ImportError: - GI_VERSION_RENDERED = 0 - from pypy.translator.geninterplevel import GI_VERSION - cls.seed = md5(str(GI_VERSION)).digest() - if GI_VERSION != GI_VERSION_RENDERED or GI_VERSION is None: - for pth in p.listdir(): - if pth.check(file=1): - try: - pth.remove() - except: pass - f = file(get_tmp_file_name(str(ini)), "w") - f.write("""\ -# This folder acts as a cache for code snippets which have been -# compiled by compile_as_module(). -# It will get a new entry for every piece of code that has -# not been seen, yet. -# -# Caution! Only the code snippet is checked. If something -# is imported, changes are not detected. Also, changes -# to geninterplevel or gateway are also not checked. -# Exception: There is a checked version number in geninterplevel.py -# -# If in doubt, remove this file from time to time. - -GI_VERSION_RENDERED = %r - -known_code = {} - -# self-destruct on double-click: -def harakiri(): - import pypy._cache as _c - import py - lp = py.path.local - for pth in lp(_c.__file__).dirpath().listdir(): - try: - pth.remove() - except: pass - -if __name__ == "__main__": - harakiri() - -del harakiri -""" % GI_VERSION) - f.close() - rename_tmp_to_eventual_file_name(str(ini)) - import pypy._cache - cls.known_code = pypy._cache.known_code - cls._setup_done = True - _setup = classmethod(_setup) - - -def gethostname(_cache=[]): - if not _cache: - try: - import socket - hostname = socket.gethostname() - except: - hostname = '' - _cache.append(hostname) - return _cache[0] - -def get_tmp_file_name(fname): - return '%s~%s~%d' % (fname, gethostname(), os.getpid()) - -def rename_tmp_to_eventual_file_name(fname): - # generated files are first written to the host- and process-specific - # file 'tmpname', and then atomically moved to their final 'fname' - # to avoid problems if py.py is started several times in parallel - tmpname = get_tmp_file_name(fname) - try: - os.rename(tmpname, fname) - except (OSError, IOError): - os.unlink(fname) # necessary on Windows - os.rename(tmpname, fname) - # ____________________________________________________________ def appdef(source, applevel=ApplevelClass, filename=None): @@ -1184,11 +1026,6 @@ return build_applevel_dict(self, space) -class applevelinterp_temp(ApplevelClass): - hidden_applevel = False - def getwdict(self, space): # no cache - return PyPyCacheDir.build_applevelinterp_dict(self, space) - # app2interp_temp is used for testing mainly def app2interp_temp(func, applevel_temp=applevel_temp, filename=None): """ NOT_RPYTHON """ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -15,9 +15,8 @@ from pypy.rlib.rarithmetic import r_uint, intmask from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import check_nonneg -from pypy.tool.stdlib_opcode import (bytecode_spec, host_bytecode_spec, - unrolling_all_opcode_descs, opmap, - host_opmap) +from pypy.tool.stdlib_opcode import (bytecode_spec, + unrolling_all_opcode_descs) def unaryoperation(operationname): """NOT_RPYTHON""" @@ -713,6 +712,19 @@ w_list = self.space.newlist(items) self.pushvalue(w_list) + def BUILD_LIST_FROM_ARG(self, _, next_instr): + # this is a little dance, because list has to be before the + # value + last_val = self.popvalue() + try: + lgt = self.space.len_w(last_val) + except OperationError, e: + if e.async(self.space): + raise + lgt = 0 # oh well + self.pushvalue(self.space.newlist([], sizehint=lgt)) + self.pushvalue(last_val) + def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() @@ -1419,11 +1431,9 @@ if lastchar.isspace() and lastchar != ' ': return file_softspace(stream, True) - print_item_to._annspecialcase_ = "specialize:argtype(0)" def print_item(x): print_item_to(x, sys_stdout()) - print_item._annspecialcase_ = "flowspace:print_item" def print_newline_to(stream): stream.write("\n") @@ -1431,7 +1441,6 @@ def print_newline(): print_newline_to(sys_stdout()) - print_newline._annspecialcase_ = "flowspace:print_newline" def file_softspace(file, newflag): try: diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import unicodehelper +from pypy.rlib.rstring import StringBuilder def parsestr(space, encoding, s, unicode_literals=False): # compiler.transformer.Transformer.decode_literal depends on what @@ -115,21 +116,23 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ - lis = [] + builder = StringBuilder(len(s)) ps = 0 end = len(s) - while ps < end: - if s[ps] != '\\': - # note that the C code has a label here. - # the logic is the same. + while 1: + ps2 = ps + while ps < end and s[ps] != '\\': if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8(space, s, ps, end, recode_encoding) - # Append bytes to output buffer. - lis.append(w) + builder.append(w) + ps2 = ps else: - lis.append(s[ps]) ps += 1 - continue + if ps > ps2: + builder.append_slice(s, ps2, ps) + if ps == end: + break + ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') @@ -140,25 +143,25 @@ if ch == '\n': pass elif ch == '\\': - lis.append('\\') + builder.append('\\') elif ch == "'": - lis.append("'") + builder.append("'") elif ch == '"': - lis.append('"') + builder.append('"') elif ch == 'b': - lis.append("\010") + builder.append("\010") elif ch == 'f': - lis.append('\014') # FF + builder.append('\014') # FF elif ch == 't': - lis.append('\t') + builder.append('\t') elif ch == 'n': - lis.append('\n') + builder.append('\n') elif ch == 'r': - lis.append('\r') + builder.append('\r') elif ch == 'v': - lis.append('\013') # VT + builder.append('\013') # VT elif ch == 'a': - lis.append('\007') # BEL, not classic C + builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps @@ -168,13 +171,13 @@ # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF - lis.append(chr(num)) + builder.append(chr(num)) ps = span elif ch == 'x': if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps : ps + 2] num = int(hexa, 16) - lis.append(chr(num)) + builder.append(chr(num)) ps += 2 else: raise_app_valueerror(space, 'invalid \\x escape') @@ -184,13 +187,13 @@ # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. - lis.append('\\') + builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. - buf = ''.join(lis) + buf = builder.build() return buf diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/streamutil.py @@ -0,0 +1,17 @@ +from pypy.rlib.streamio import StreamError +from pypy.interpreter.error import OperationError, wrap_oserror2 + +def wrap_streamerror(space, e, w_filename=None): + if isinstance(e, StreamError): + return OperationError(space.w_ValueError, + space.wrap(e.message)) + elif isinstance(e, OSError): + return wrap_oserror_as_ioerror(space, e, w_filename) + else: + # should not happen: wrap_streamerror() is only called when + # StreamErrors = (OSError, StreamError) are raised + return OperationError(space.w_IOError, space.w_None) + +def wrap_oserror_as_ioerror(space, e, w_filename=None): + return wrap_oserror2(space, e, w_filename, + w_exception_class=space.w_IOError) diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -1,6 +1,6 @@ import py -from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp, applevelinterp_temp +from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp from pypy.interpreter.error import OperationError def test_execwith_novars(space): @@ -82,9 +82,6 @@ w_res = g(space, space.wrap(10), space.wrap(1)) assert space.eq_w(w_res, space.wrap(-9)) -def test_applevelinterp_functions(space): - test_applevel_functions(space, applevel_temp = applevelinterp_temp) - def test_applevel_class(space, applevel_temp = applevel_temp): app = applevel_temp(''' class C(object): @@ -99,9 +96,6 @@ w_clsattr = space.getattr(c, space.wrap('attr')) assert space.eq_w(w_clsattr, space.wrap(17)) -def test_applevelinterp_class(space): - test_applevel_class(space, applevel_temp = applevelinterp_temp) - def app_test_something_at_app_level(): x = 2 assert x/2 == 1 @@ -161,7 +155,7 @@ w_str = space1.getattr(w_mymod1, space1.wrap("hi")) assert space1.str_w(w_str) == "hello" - def test_geninterp_can_unfreeze(self): + def test_random_stuff_can_unfreeze(self): # When a module contains an "import" statement in applevel code, the # imported module is initialized, possibly after it has been already # frozen. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -101,14 +101,6 @@ g3 = gateway.app2interp_temp(noapp_g3, gateway.applevel_temp) assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar')) - def test_app2interp2(self): - """same but using transformed code""" - w = self.space.wrap - def noapp_g3(a, b): - return a+b - g3 = gateway.app2interp_temp(noapp_g3, gateway.applevelinterp_temp) - assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar')) - def test_app2interp_general_args(self): w = self.space.wrap def app_general(x, *args, **kwds): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -322,3 +322,14 @@ space.ALL_BUILTIN_MODULES.pop() del space._builtinmodule_list mods = space.get_builtinmodule_to_install() + + def test_dont_reload_builtin_mods_on_startup(self): + from pypy.tool.option import make_config, make_objspace + config = make_config(None) + space = make_objspace(config) + w_executable = space.wrap('executable') + assert space.str_w(space.getattr(space.sys, w_executable)) == 'py.py' + space.setattr(space.sys, w_executable, space.wrap('foobar')) + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' + space.startup() + assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -304,6 +304,42 @@ assert_method(w_o1, "c", True) assert_method(w_o2, "c", False) + def test_total_ordering(self): + class W_SomeType(Wrappable): + def __init__(self, space, x): + self.space = space + self.x = x + + def descr__lt(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x < w_other.x) + + def descr__eq(self, w_other): + assert isinstance(w_other, W_SomeType) + return self.space.wrap(self.x == w_other.x) + + W_SomeType.typedef = typedef.TypeDef( + 'some_type', + __total_ordering__ = 'auto', + __lt__ = interp2app(W_SomeType.descr__lt), + __eq__ = interp2app(W_SomeType.descr__eq), + ) + space = self.space + w_b = space.wrap(W_SomeType(space, 2)) + w_c = space.wrap(W_SomeType(space, 2)) + w_a = space.wrap(W_SomeType(space, 1)) + # explicitly defined + assert space.is_true(space.lt(w_a, w_b)) + assert not space.is_true(space.eq(w_a, w_b)) + assert space.is_true(space.eq(w_b, w_c)) + # automatically defined + assert space.is_true(space.le(w_a, w_b)) + assert space.is_true(space.le(w_b, w_c)) + assert space.is_true(space.gt(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_a)) + assert space.is_true(space.ge(w_b, w_c)) + assert space.is_true(space.ne(w_a, w_b)) + assert not space.is_true(space.ne(w_b, w_c)) class AppTestTypeDef: diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -17,14 +17,14 @@ def test_executable(): """Ensures sys.executable points to the py.py script""" # TODO : watch out for spaces/special chars in pypypath - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.executable") assert output.splitlines()[-1] == pypypath def test_special_names(): """Test the __name__ and __file__ special global names""" cmd = "print __name__; print '__file__' in globals()" - output = run(sys.executable, pypypath, '-c', cmd) + output = run(sys.executable, pypypath, '-S', '-c', cmd) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == 'False' @@ -33,24 +33,24 @@ tmpfile.write("print __name__; print __file__\n") tmpfile.close() - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == str(tmpfilepath) def test_argv_command(): """Some tests on argv""" # test 1 : no arguments - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv") assert output.splitlines()[-1] == str(['-c']) # test 2 : some arguments after - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, + output = run(sys.executable, pypypath, '-S', "-O", "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) @@ -65,15 +65,15 @@ tmpfile.close() # test 1 : no arguments - output = run(sys.executable, pypypath, tmpfilepath) + output = run(sys.executable, pypypath, '-S', tmpfilepath) assert output.splitlines()[-1] == str([tmpfilepath]) # test 2 : some arguments after - output = run(sys.executable, pypypath, tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) # test 3 : additionnal pypy parameters - output = run(sys.executable, pypypath, "-O", tmpfilepath, "hello") + output = run(sys.executable, pypypath, '-S', "-O", tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) @@ -95,7 +95,7 @@ tmpfile.write(TB_NORMALIZATION_CHK) tmpfile.close() - popen = subprocess.Popen([sys.executable, str(pypypath), tmpfilepath], + popen = subprocess.Popen([sys.executable, str(pypypath), '-S', tmpfilepath], stderr=subprocess.PIPE) _, stderr = popen.communicate() assert stderr.endswith('KeyError: \n') diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -12,7 +12,7 @@ from pypy.rlib.jit import promote class TypeDef: - def __init__(self, __name, __base=None, **rawdict): + def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict): "NOT_RPYTHON: initialization-time only" self.name = __name if __base is None: @@ -34,6 +34,9 @@ # xxx used by faking self.fakedcpytype = None self.add_entries(**rawdict) + assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering" + if __total_ordering__ == 'auto': + self.auto_total_ordering() def add_entries(self, **rawdict): # xxx fix the names of the methods to match what app-level expects @@ -41,7 +44,15 @@ if isinstance(value, (interp2app, GetSetProperty)): value.name = key self.rawdict.update(rawdict) - + + def auto_total_ordering(self): + assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__" + assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__" + self.add_entries(__le__ = auto__le__, + __gt__ = auto__gt__, + __ge__ = auto__ge__, + __ne__ = auto__ne__) + def _freeze_(self): # hint for the annotator: track individual constant instances of TypeDef return True @@ -50,6 +61,26 @@ return "<%s name=%r>" % (self.__class__.__name__, self.name) +# generic special cmp methods defined on top of __lt__ and __eq__, used by +# automatic total ordering + + at interp2app +def auto__le__(space, w_self, w_other): + return space.not_(space.lt(w_other, w_self)) + + at interp2app +def auto__gt__(space, w_self, w_other): + return space.lt(w_other, w_self) + + at interp2app +def auto__ge__(space, w_self, w_other): + return space.not_(space.lt(w_self, w_other)) + + at interp2app +def auto__ne__(space, w_self, w_other): + return space.not_(space.eq(w_self, w_other)) + + # ____________________________________________________________ # Hash support diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -171,7 +171,7 @@ 'unicodesetitem' : (('ref', 'int', 'int'), 'int'), 'cast_ptr_to_int' : (('ref',), 'int'), 'cast_int_to_ptr' : (('int',), 'ref'), - 'debug_merge_point': (('ref', 'int'), None), + 'debug_merge_point': (('ref', 'int', 'int'), None), 'force_token' : ((), 'int'), 'call_may_force' : (('int', 'varargs'), 'intorptr'), 'guard_not_forced': ((), None), diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -208,6 +208,7 @@ This is the class supporting --gcrootfinder=asmgcc. """ is_shadow_stack = False + is_64_bit = (WORD == 8) LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -336,17 +337,17 @@ self._gcmap_deadentries += 1 item += asmgcroot.arrayitemsize - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): # XXX: Should this code even really know about stack frame layout of # the JIT? - if is_64_bit: - return [chr(self.LOC_EBP_PLUS | 8), - chr(self.LOC_EBP_MINUS | 8), - chr(self.LOC_EBP_MINUS | 16), - chr(self.LOC_EBP_MINUS | 24), - chr(self.LOC_EBP_MINUS | 32), - chr(self.LOC_EBP_MINUS | 40), - chr(self.LOC_EBP_PLUS | 0), + if self.is_64_bit: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 8(%rbp) + chr(self.LOC_EBP_MINUS | 4), # saved %rbx: at -8(%rbp) + chr(self.LOC_EBP_MINUS | 8), # saved %r12: at -16(%rbp) + chr(self.LOC_EBP_MINUS | 12), # saved %r13: at -24(%rbp) + chr(self.LOC_EBP_MINUS | 16), # saved %r14: at -32(%rbp) + chr(self.LOC_EBP_MINUS | 20), # saved %r15: at -40(%rbp) + chr(self.LOC_EBP_PLUS | 0), # saved %rbp: at (%rbp) chr(0)] else: return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) @@ -366,7 +367,11 @@ shape.append(chr(number | flag)) def add_frame_offset(self, shape, offset): - assert (offset & 3) == 0 + if self.is_64_bit: + assert (offset & 7) == 0 + offset >>= 1 + else: + assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset else: @@ -518,7 +523,7 @@ def initialize(self): pass - def get_basic_shape(self, is_64_bit=False): + def get_basic_shape(self): return [] def add_frame_offset(self, shape, offset): @@ -594,7 +599,7 @@ # if convenient for the backend, we compute the info about # the flag as (byte-offset, single-byte-flag). import struct - value = struct.pack("l", flag_word) + value = struct.pack(lltype.SignedFmt, flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 i = 0 while value[i] == '\x00': i += 1 @@ -769,11 +774,19 @@ self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) - # Rarely called: allocate a fixed-size amount of bytes, but - # not in the nursery, because it is too big. Implemented like - # malloc_nursery_slowpath() above. - self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, - [lltype.Signed]) + # Never called as far as I can tell, but there for completeness: + # allocate a fixed-size object, but not in the nursery, because + # it is too big. + def malloc_big_fixedsize(size, tid): + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) def _bh_malloc(self, sizedescr): from pypy.rpython.memory.gctypelayout import check_typeid diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -321,7 +321,7 @@ except KeyError: pass # 'var' is already not in a register - def loc(self, box): + def loc(self, box, must_exist=False): """ Return the location of 'box'. """ self._check_type(box) @@ -332,6 +332,8 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg + if must_exist: + return self.frame_manager.bindings[box] return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -360,7 +362,7 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + prev_loc = self.loc(v, must_exist=True) if prev_loc is self.frame_reg and selected_reg is None: return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -96,8 +96,10 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - self.gen_malloc_nursery(size, op.result) - self.gen_initialize_tid(op.result, descr.tid) + if self.gen_malloc_nursery(size, op.result): + self.gen_initialize_tid(op.result, descr.tid) + else: + self.gen_malloc_fixedsize(size, descr.tid, op.result) def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) @@ -112,8 +114,8 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily - self.gen_malloc_nursery(total_size, op.result) + if (total_size >= 0 and + self.gen_malloc_nursery(total_size, op.result)): self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) elif self.gc_ll_descr.kind == 'boehm': @@ -147,13 +149,22 @@ # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None - def gen_malloc_fixedsize(self, size, v_result): - """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). - Note that with the framework GC, this should be called very rarely. + def gen_malloc_fixedsize(self, size, typeid, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). + Used on Boehm, and on the framework GC for large fixed-size + mallocs. (For all I know this latter case never occurs in + practice, but better safe than sorry.) """ - addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') - self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, - self.gc_ll_descr.malloc_fixedsize_descr) + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + assert (size & (WORD-1)) == 0, "size not aligned?" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + else: # Boehm + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + args = [ConstInt(addr), ConstInt(size)] + descr = self.gc_ll_descr.malloc_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -211,8 +222,7 @@ """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_fixedsize(size, v_result) - return + return False # op = None if self._op_malloc_nursery is not None: @@ -238,6 +248,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -148,7 +148,7 @@ # def get_alignment(code): # Retrieve default alignment for the compiler/platform - return struct.calcsize('l' + code) - struct.calcsize(code) + return struct.calcsize(lltype.SignedFmt + code) - struct.calcsize(code) assert descr1.basesize == get_alignment('c') assert descr2.basesize == get_alignment('p') assert descr3.basesize == get_alignment('p') diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -2,6 +2,7 @@ from pypy.jit.codewriter.longlong import is_64_bit from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * +from pypy.rlib.rarithmetic import is_emulated_long class FakeCPU: @@ -43,7 +44,7 @@ assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False - if not is_64_bit: + if not is_64_bit or is_emulated_long: descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong, None, 42) assert descr is None # missing longlongs diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -11,6 +11,7 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.rlib.rarithmetic import is_valid_int def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -57,6 +58,7 @@ def frame_pos(n): return -4*(4+n) gcrootmap = GcRootMap_asmgcc() + gcrootmap.is_64_bit = False num1 = frame_pos(-5) num1a = num1|2 num2 = frame_pos(55) @@ -102,7 +104,7 @@ gcrootmap.put(retaddr, shapeaddr) assert gcrootmap._gcmap[0] == retaddr assert gcrootmap._gcmap[1] == shapeaddr - p = rffi.cast(rffi.LONGP, gcrootmap.gcmapstart()) + p = rffi.cast(rffi.SIGNEDP, gcrootmap.gcmapstart()) assert p[0] == retaddr assert (gcrootmap.gcmapend() == gcrootmap.gcmapstart() + rffi.sizeof(lltype.Signed) * 2) @@ -418,9 +420,9 @@ assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() - assert isinstance(wbdescr.jit_wb_if_flag, int) - assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) - assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) + assert is_valid_int(wbdescr.jit_wb_if_flag) + assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) + assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) def test_get_rid_of_debug_merge_point(self): operations = [ diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -1,4 +1,4 @@ - +import py from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan @@ -236,6 +236,16 @@ assert isinstance(loc, FakeFramePos) assert len(asm.moves) == 1 + def test_bogus_make_sure_var_in_reg(self): + b0, = newboxes(0) + longevity = {b0: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.next_instruction() + # invalid call to make_sure_var_in_reg(): box unknown so far + py.test.raises(KeyError, rm.make_sure_var_in_reg, b0) + def test_return_constant(self): asm = MockAsm() boxes, longevity = boxes_and_longevity(5) diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -119,12 +119,19 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=alendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + 10, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(adescr.basesize + 10 * adescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=alendescr) def test_new_array_variable(self): self.check_rewrite(""" @@ -178,13 +185,20 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 10, descr=unicodelendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(unicodedescr.basesize)d, \ + 10, \ + %(unicodedescr.itemsize)d, \ + %(unicodelendescr.offset)d, \ + descr=malloc_array_descr) jump() """) +## should ideally be: +## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ +## %(unicodedescr.basesize + \ +## 10 * unicodedescr.itemsize)d, \ +## descr=malloc_fixedsize_descr) +## setfield_gc(p0, 10, descr=unicodelendescr) class TestFramework(RewriteTests): @@ -203,7 +217,7 @@ # class FakeCPU(object): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(102) + descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -368,11 +382,9 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 104)d, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 103, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 103, \ + descr=malloc_array_descr) jump() """) @@ -435,9 +447,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ - descr=malloc_fixedsize_descr) - setfield_gc(p0, 9315, descr=tiddescr) + p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ + descr=malloc_big_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -16,9 +16,11 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException from pypy.jit.codewriter import heaptracker, longlong -from pypy.rlib.rarithmetic import intmask +from pypy.rlib import longlong2float +from pypy.rlib.rarithmetic import intmask, is_valid_int from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -266,6 +268,38 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): + i0 = BoxInt() + faildescr1 = BasicFailDescr(1) + looptoken = JitCellToken() + operations = [ + ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1), + ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), + ] + inputargs = [i0] + operations[0].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken) + + i1list = [BoxInt() for i in range(1000)] + bridge = [] + iprev = i0 + for i1 in i1list: + bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) + iprev = i1 + bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, + descr=BasicFailDescr(3))) + bridge.append(ResOperation(rop.FINISH, [], None, + descr=BasicFailDescr(4))) + bridge[-2].setfailargs(i1list) + + self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + + fail = self.cpu.execute_token(looptoken, 1) + assert fail.identifier == 3 + for i in range(1000): + res = self.cpu.get_latest_value_int(i) + assert res == 2 + i + def test_get_latest_value_count(self): i0 = BoxInt() i1 = BoxInt() @@ -461,7 +495,7 @@ if cpu.supports_floats: def func(f, i): assert isinstance(f, float) - assert isinstance(i, int) + assert is_valid_int(i) return f - float(i) FPTR = self.Ptr(self.FuncType([lltype.Float, lltype.Signed], lltype.Float)) @@ -572,7 +606,7 @@ [funcbox, BoxInt(arg1), BoxInt(arg2)], 'int', descr=calldescr) assert res.getint() == f(arg1, arg2) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -1458,18 +1492,36 @@ def test_noops(self): c_box = self.alloc_string("hi there").constbox() c_nest = ConstInt(0) - self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void') + c_id = ConstInt(0) + self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void') self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') def test_read_timestamp(self): + if sys.platform == 'win32': + # windows quite often is very inexact (like the old Intel 8259 PIC), + # so we stretch the time a little bit. + # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini, + # the test starts working at delay == 21670 and stops at 20600000. + # We take the geometric mean value. + from math import log, exp + delay_min = 21670 + delay_max = 20600000 + delay = int(exp((log(delay_min)+log(delay_max))/2)) + def wait_a_bit(): + for i in xrange(delay): pass + else: + def wait_a_bit(): + pass if longlong.is_64_bit: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') res1 = got1.getint() res2 = got2.getint() else: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') res1 = got1.getlonglong() res2 = got2.getlonglong() @@ -1565,6 +1617,12 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_convert_float_bytes(self): + t = 'int' if longlong.is_64_bit else 'float' + res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, + [boxfloat(2.5)], t).value + assert res == longlong2float.float2longlong(2.5) + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) @@ -3029,7 +3087,7 @@ ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) @@ -3074,7 +3132,7 @@ assert len(mc) == len(ops) for i in range(len(mc)): assert mc[i].split("\t")[-1].startswith(ops[i]) - + data = ctypes.string_at(info.asmaddr, info.asmlen) mc = list(machine_code_dump(data, info.asmaddr, cpuname)) lines = [line for line in mc if line.count('\t') == 2] diff --git a/pypy/jit/backend/test/support.py b/pypy/jit/backend/test/support.py --- a/pypy/jit/backend/test/support.py +++ b/pypy/jit/backend/test/support.py @@ -3,6 +3,7 @@ from pypy.rlib.debug import debug_print from pypy.translator.translator import TranslationContext, graphof from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from pypy.rlib.rarithmetic import is_valid_int class BaseCompiledMixin(object): @@ -24,7 +25,7 @@ from pypy.annotation import model as annmodel for arg in args: - assert isinstance(arg, int) + assert is_valid_int(arg) self.pre_translation_hook() t = self._get_TranslationContext() diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -449,6 +449,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) +OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) OperationBuilder.OPERATIONS = OPERATIONS @@ -502,11 +503,11 @@ else: assert 0, "unknown backend %r" % pytest.config.option.backend -# ____________________________________________________________ +# ____________________________________________________________ class RandomLoop(object): dont_generate_more = False - + def __init__(self, cpu, builder_factory, r, startvars=None): self.cpu = cpu if startvars is None: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -88,7 +88,6 @@ self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self.fail_boxes_count = 0 - self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 self.propagate_exception_path = 0 @@ -442,10 +441,8 @@ looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily - clt.param_depth = -1 # temporarily - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth - clt.param_depth = param_depth # size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() @@ -459,8 +456,7 @@ rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -500,14 +496,13 @@ assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) - fail_depths = faildescr._x86_current_depths startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs) stackadjustpos = self._patchable_stackadjust() - frame_depth, param_depth = self._assemble(regalloc, operations) + frame_depth = self._assemble(regalloc, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -517,19 +512,16 @@ debug_print("bridge out of Guard %d has address %x to %x" % (descr_number, rawstart, rawstart + codeendpos)) debug_stop("jit-backend-addr") - self._patch_stackadjust(rawstart + stackadjustpos, - frame_depth + param_depth) + self._patch_stackadjust(rawstart + stackadjustpos, frame_depth) self.patch_pending_failure_recoveries(rawstart) if not we_are_translated(): # for the benefit of tests faildescr._x86_bridge_frame_depth = frame_depth - faildescr._x86_bridge_param_depth = param_depth # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset self.fixup_target_tokens(rawstart) self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) - self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -614,7 +606,7 @@ else: assert token struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): @@ -673,7 +665,7 @@ ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] operations.extend(ops) - + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: @@ -700,15 +692,12 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.get_frame_depth() - param_depth = regalloc.param_depth + frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: target_frame_depth = jump_target_descr._x86_clt.frame_depth - target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) - param_depth = max(param_depth, target_param_depth) - return frame_depth, param_depth + return frame_depth def _patchable_stackadjust(self): # stack adjustment LEA @@ -847,8 +836,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -858,8 +847,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -892,10 +881,9 @@ genop_math_list[oopspecindex](self, op, arglocs, resloc) def regalloc_perform_with_guard(self, op, guard_op, faillocs, - arglocs, resloc, current_depths): + arglocs, resloc): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - faildescr._x86_current_depths = current_depths failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, @@ -911,10 +899,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, - current_depths): + def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc): self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, - resloc, current_depths) + resloc) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) @@ -1038,13 +1025,14 @@ self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) p += loc.get_width() - self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) # if callconv != FFI_DEFAULT_ABI: self._fix_stdcall(callconv, p) + # + self._regalloc.needed_extra_stack_locations(p//WORD) def _fix_stdcall(self, callconv, p): from pypy.rlib.clibffi import FFI_STDCALL @@ -1127,9 +1115,9 @@ x = r10 remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) self.mark_gc_roots(force_index) + self._regalloc.needed_extra_stack_locations(len(pass_on_stack)) def call(self, addr, args, res): force_index = self.write_new_force_index() @@ -1254,6 +1242,15 @@ self.mc.MOVD_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1966,8 +1963,6 @@ mc.PUSH_r(ebx.value) elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2129,14 +2124,16 @@ # First, we need to save away the registers listed in # 'save_registers' that are not callee-save. XXX We assume that # the XMM registers won't be modified. We store them in - # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the - # single argument to closestack_addr below. - p = WORD + # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP] + # for the single argument to closestack_addr below. + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_sr(p, reg.value) p += WORD - self._regalloc.reserve_param(p//WORD) # if gcrootmap.is_shadow_stack: args = [] @@ -2187,11 +2184,15 @@ # self._emit_call(-1, imm(self.releasegil_addr), args) # Finally, restore the registers saved above. - p = WORD + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_rs(reg.value, p) p += WORD + self._regalloc.needed_extra_stack_locations(p//WORD) def call_reacquire_gil(self, gcrootmap, save_loc): # save the previous result (eax/xmm0) into the stack temporarily. @@ -2199,7 +2200,6 @@ # to save xmm0 in this case. if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_sr(WORD, save_loc.value) - self._regalloc.reserve_param(2) # call the reopenstack() function (also reacquiring the GIL) if gcrootmap.is_shadow_stack: args = [] @@ -2219,6 +2219,7 @@ # restore the result from the stack if isinstance(save_loc, RegLoc) and not save_loc.is_xmm: self.mc.MOV_rs(save_loc.value, WORD) + self._regalloc.needed_extra_stack_locations(2) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2495,11 +2496,6 @@ # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - # reserve room for the argument to the real malloc and the - # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 - # word) - self._regalloc.reserve_param(1+16) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: @@ -2510,6 +2506,11 @@ slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) + # reserve room for the argument to the real malloc and the + # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1 + # word) + self._regalloc.needed_extra_stack_locations(1+16) + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py --- a/pypy/jit/backend/x86/codebuf.py +++ b/pypy/jit/backend/x86/codebuf.py @@ -19,8 +19,8 @@ class MachineCodeBlockWrapper(BlockBuilderMixin, - codebuilder_cls, - LocationCodeBuilder): + LocationCodeBuilder, + codebuilder_cls): def __init__(self): self.init_block_builder() # a list of relative positions; for each position p, the bytes diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -168,7 +168,7 @@ def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() - self.param_depth = 0 + self.min_frame_depth = 0 cpu = self.assembler.cpu operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) @@ -193,11 +193,9 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, - allgcrefs): + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.param_depth = prev_depths[1] self.min_bytes_before_label = 0 return operations @@ -205,8 +203,15 @@ self.min_bytes_before_label = max(self.min_bytes_before_label, at_least_position) - def reserve_param(self, n): - self.param_depth = max(self.param_depth, n) + def needed_extra_stack_locations(self, n): + # call *after* you needed extra stack locations: (%esp), (%esp+4)... + min_frame_depth = self.fm.get_frame_depth() + n + if min_frame_depth > self.min_frame_depth: + self.min_frame_depth = min_frame_depth + + def get_final_frame_depth(self): + self.needed_extra_stack_locations(0) # update min_frame_depth + return self.min_frame_depth def _set_initial_bindings(self, inputargs): if IS_X86_64: @@ -376,25 +381,12 @@ def locs_for_fail(self, guard_op): return [self.loc(v) for v in guard_op.getfailargs()] - def get_current_depth(self): - # return (self.fm.frame_depth, self.param_depth), but trying to share - # the resulting tuple among several calls - arg0 = self.fm.get_frame_depth() - arg1 = self.param_depth - result = self.assembler._current_depths_cache - if result[0] != arg0 or result[1] != arg1: - result = (arg0, arg1) - self.assembler._current_depths_cache = result - return result - def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - current_depths = self.get_current_depth() self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, - arglocs, result_loc, - current_depths) + arglocs, result_loc) if op.result is not None: self.possibly_free_var(op.result) self.possibly_free_vars(guard_op.getfailargs()) @@ -407,10 +399,8 @@ arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - current_depths = self.get_current_depth() self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, - result_loc, - current_depths) + result_loc) self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): @@ -776,6 +766,18 @@ consider_cast_singlefloat_to_float = consider_cast_int_to_float + def consider_convert_float_bytes_to_longlong(self, op): + if longlong.is_64_bit: + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + else: + loc0 = self.xrm.loc(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second @@ -1393,7 +1395,7 @@ self.force_spill_var(op.getarg(0)) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): - shape = gcrootmap.get_basic_shape(IS_X86_64) + shape = gcrootmap.get_basic_shape() for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,9 +601,10 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) + # These work on machine sized registers. + MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.jit.backend.x86.arch import WORD def values_array(TP, size): @@ -35,10 +36,15 @@ # ____________________________________________________________ -if sys.platform == 'win32': - ensure_sse2_floats = lambda : None +if WORD == 4: + extra = ['-DPYPY_X86_CHECK_SSE2'] else: - ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( - compile_extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'], - )) + extra = [] + +if sys.platform != 'win32': + extra = ['-msse2', '-mfpmath=sse', + '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra + +ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( + compile_extra = extra, +)) diff --git a/pypy/jit/backend/x86/test/conftest.py b/pypy/jit/backend/x86/test/conftest.py --- a/pypy/jit/backend/x86/test/conftest.py +++ b/pypy/jit/backend/x86/test/conftest.py @@ -1,4 +1,4 @@ -import py +import py, os from pypy.jit.backend import detect_cpu cpu = detect_cpu.autodetect() @@ -6,5 +6,7 @@ if cpu not in ('x86', 'x86_64'): py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) if cpu == 'x86_64': + if os.name == "nt": + py.test.skip("Windows cannot allocate non-reserved memory") from pypy.rpython.lltypesystem import ll2ctypes ll2ctypes.do_allocation_in_far_regions() diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -28,7 +28,7 @@ class MockGcRootMap(object): is_shadow_stack = False - def get_basic_shape(self, is_64_bit): + def get_basic_shape(self): return ['shape'] def add_frame_offset(self, shape, offset): shape.append(offset) @@ -184,6 +184,8 @@ self.addrs[1] = self.addrs[0] + 64 self.calls = [] def malloc_slowpath(size): + if self.gcrootmap is not None: # hook + self.gcrootmap.hook_malloc_slowpath() self.calls.append(size) # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) @@ -257,3 +259,218 @@ assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once assert gc_ll_descr.calls == [24] + + def test_save_regs_around_malloc(self): + S1 = lltype.GcStruct('S1') + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + cpu = self.cpu + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + setattr(s2, 's%d' % i, lltype.malloc(S1)) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr = cpu.gc_ll_descr + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + assert s1 == getattr(s2, 's%d' % i) + + +class MockShadowStackRootMap(MockGcRootMap): + is_shadow_stack = True + MARKER_FRAME = 88 # this marker follows the frame addr + S1 = lltype.GcStruct('S1') + + def __init__(self): + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20, + flavor='raw') + # root_stack_top + self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD + # random stuff + self.addrs[1] = 123456 + self.addrs[2] = 654321 + self.check_initial_and_final_state() + self.callshapes = {} + self.should_see = [] + + def check_initial_and_final_state(self): + assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD + assert self.addrs[1] == 123456 + assert self.addrs[2] == 654321 + + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.addrs) + + def compress_callshape(self, shape, datablockwrapper): + assert shape[0] == 'shape' + return ['compressed'] + shape[1:] + + def write_callshape(self, mark, force_index): + assert mark[0] == 'compressed' + assert force_index not in self.callshapes + assert force_index == 42 + len(self.callshapes) + self.callshapes[force_index] = mark + + def hook_malloc_slowpath(self): + num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs) + assert num_entries == 5*WORD # 3 initially, plus 2 by the asm frame + assert self.addrs[1] == 123456 # unchanged + assert self.addrs[2] == 654321 # unchanged + frame_addr = self.addrs[3] # pushed by the asm frame + assert self.addrs[4] == self.MARKER_FRAME # pushed by the asm frame + # + from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), + frame_addr + FORCE_INDEX_OFS) + force_index = addr[0] + assert force_index == 43 # in this test: the 2nd call_malloc_nursery + # + # The callshapes[43] saved above should list addresses both in the + # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16 + # of test_save_regs_at_correct_place should have been stored. Here + # we replace them with new addresses, to emulate a moving GC. + shape = self.callshapes[force_index] + assert len(shape[1:]) == len(self.should_see) + new_objects = [None] * len(self.should_see) + for ofs in shape[1:]: + assert isinstance(ofs, int) # not a register at all here + addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs) + contains = addr[0] + for j in range(len(self.should_see)): + obj = self.should_see[j] + if contains == rffi.cast(lltype.Signed, obj): + assert new_objects[j] is None # duplicate? + break + else: + assert 0 # the value read from the stack looks random? + new_objects[j] = lltype.malloc(self.S1) + addr[0] = rffi.cast(lltype.Signed, new_objects[j]) + self.should_see[:] = new_objects + + +class TestMallocShadowStack(BaseTestRegalloc): + + def setup_method(self, method): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrFastpathMalloc() + cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap() + cpu.setup_once() + for i in range(42): + cpu.reserve_some_free_fail_descr_number() + self.cpu = cpu + + def test_save_regs_at_correct_place(self): + cpu = self.cpu + gc_ll_descr = cpu.gc_ll_descr + S1 = gc_ll_descr.gcrootmap.S1 + S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)), + ('s1', lltype.Ptr(S1)), + ('s2', lltype.Ptr(S1)), + ('s3', lltype.Ptr(S1)), + ('s4', lltype.Ptr(S1)), + ('s5', lltype.Ptr(S1)), + ('s6', lltype.Ptr(S1)), + ('s7', lltype.Ptr(S1)), + ('s8', lltype.Ptr(S1)), + ('s9', lltype.Ptr(S1)), + ('s10', lltype.Ptr(S1)), + ('s11', lltype.Ptr(S1)), + ('s12', lltype.Ptr(S1)), + ('s13', lltype.Ptr(S1)), + ('s14', lltype.Ptr(S1)), + ('s15', lltype.Ptr(S1))) + self.namespace = self.namespace.copy() + for i in range(16): + self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i) + ops = ''' + [p0] + p1 = getfield_gc(p0, descr=ds0) + p2 = getfield_gc(p0, descr=ds1) + p3 = getfield_gc(p0, descr=ds2) + p4 = getfield_gc(p0, descr=ds3) + p5 = getfield_gc(p0, descr=ds4) + p6 = getfield_gc(p0, descr=ds5) + p7 = getfield_gc(p0, descr=ds6) + p8 = getfield_gc(p0, descr=ds7) + p9 = getfield_gc(p0, descr=ds8) + p10 = getfield_gc(p0, descr=ds9) + p11 = getfield_gc(p0, descr=ds10) + p12 = getfield_gc(p0, descr=ds11) + p13 = getfield_gc(p0, descr=ds12) + p14 = getfield_gc(p0, descr=ds13) + p15 = getfield_gc(p0, descr=ds14) + p16 = getfield_gc(p0, descr=ds15) + # + # now all registers are in use + p17 = call_malloc_nursery(40) + p18 = call_malloc_nursery(40) # overflow + # + finish(p1, p2, p3, p4, p5, p6, p7, p8, \ + p9, p10, p11, p12, p13, p14, p15, p16) + ''' + s2 = lltype.malloc(S2) + for i in range(16): + s1 = lltype.malloc(S1) + setattr(s2, 's%d' % i, s1) + gc_ll_descr.gcrootmap.should_see.append(s1) + s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + # + self.interpret(ops, [s2ref]) + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.calls == [40] + gc_ll_descr.gcrootmap.check_initial_and_final_state() + # check the returned pointers + for i in range(16): + s1ref = self.cpu.get_latest_value_ref(i) + s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref) + for j in range(16): + assert s1 != getattr(s2, 's%d' % j) + assert s1 == gc_ll_descr.gcrootmap.should_see[i] diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -34,7 +34,6 @@ ''' loop = self.interpret(ops, [0]) previous = loop._jitcelltoken.compiled_loop_token.frame_depth - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -51,7 +50,6 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous fail = self.run(loop, 0) @@ -116,10 +114,8 @@ loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -606,23 +606,37 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): - def expected_param_depth(self, num_args): + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if IS_X86_32: - return num_args + extra_esp = num_call_args + return extra_esp elif IS_X86_64: - return max(num_args - 6, 0) + # 'num_pushed_input_args' is for X86_64 only + extra_esp = max(num_call_args - 6, 0) + return num_pushed_input_args + extra_esp def test_one_call(self): ops = ''' - [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) - assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(1) + assert clt.frame_depth == self.expected_frame_depth(1, 5) + + def test_one_call_reverse(self): + ops = ''' + [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0] + i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) + finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b) + ''' + loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4]) + assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.frame_depth == self.expected_frame_depth(1, 6) def test_two_calls(self): ops = ''' @@ -634,7 +648,7 @@ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(2) + assert clt.frame_depth == self.expected_frame_depth(2, 5) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,25 +662,31 @@ loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 clt = loop._jitcelltoken.compiled_loop_token - assert clt.param_depth == self.expected_param_depth(10) + assert clt.frame_depth == self.expected_frame_depth(10) def test_bridge_calls_1(self): ops = ''' [i0, i1] i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_value(i2, 0, descr=fdescr1) [i2, i1] + guard_value(i2, 0, descr=fdescr1) [i2, i0, i1] finish(i1) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 5 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(1, 2) + ops = ''' - [i2, i1] + [i2, i0, i1] i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) - finish(i3, descr=fdescr2) + finish(i3, i0, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(2, 2) self.run(loop, 4, 7) assert self.getint(0) == 5*7 @@ -676,10 +696,14 @@ [i0, i1] i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) guard_value(i2, 0, descr=fdescr1) [i2] - finish(i1) + finish(i2) ''' loop = self.interpret(ops, [4, 7]) assert self.getint(0) == 4*7 + clt = loop._jitcelltoken.compiled_loop_token + orgdepth = clt.frame_depth + assert orgdepth == self.expected_frame_depth(2) + ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) @@ -687,7 +711,9 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) + assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1)) + assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \ + self.expected_frame_depth(1) self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -371,7 +371,7 @@ operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), @@ -390,7 +390,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), + ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -531,12 +531,12 @@ loop = """ [i0] label(i0, descr=preambletoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] @@ -569,7 +569,7 @@ loop = """ [i0] label(i0, descr=targettoken) - debug_merge_point('xyz', 0) + debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] diff --git a/pypy/jit/backend/x86/test/test_zmath.py b/pypy/jit/backend/x86/test/test_zmath.py --- a/pypy/jit/backend/x86/test/test_zmath.py +++ b/pypy/jit/backend/x86/test/test_zmath.py @@ -6,6 +6,8 @@ from pypy.translator.c.test.test_genc import compile from pypy.jit.backend.x86.support import ensure_sse2_floats from pypy.rlib import rfloat +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_print def get_test_case((fnname, args, expected)): @@ -16,16 +18,32 @@ expect_valueerror = (expected == ValueError) expect_overflowerror = (expected == OverflowError) check = test_direct.get_tester(expected) + unroll_args = unrolling_iterable(args) # def testfn(): + debug_print('calling', fnname, 'with arguments:') + for arg in unroll_args: + debug_print('\t', arg) try: got = fn(*args) except ValueError: - return expect_valueerror + if expect_valueerror: + return True + else: + debug_print('unexpected ValueError!') + return False except OverflowError: - return expect_overflowerror + if expect_overflowerror: + return True + else: + debug_print('unexpected OverflowError!') + return False else: - return check(got) + if check(got): + return True + else: + debug_print('unexpected result:', got) + return False # testfn.func_name = 'test_' + fnname return testfn diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py --- a/pypy/jit/backend/x86/test/test_ztranslation.py +++ b/pypy/jit/backend/x86/test/test_ztranslation.py @@ -52,6 +52,7 @@ set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) + j = float(j) while frame.i > 3: jitdriver.can_enter_jit(frame=frame, total=total, j=j) jitdriver.jit_merge_point(frame=frame, total=total, j=j) diff --git a/pypy/jit/backend/x86/tool/viewcode.py b/pypy/jit/backend/x86/tool/viewcode.py --- a/pypy/jit/backend/x86/tool/viewcode.py +++ b/pypy/jit/backend/x86/tool/viewcode.py @@ -34,7 +34,7 @@ # I am porting it in a lazy fashion... See py-utils/xam.py if sys.platform == "win32": - XXX # lots more in Psyco + pass # lots more in Psyco def machine_code_dump(data, originaddr, backend_name, label_list=None): objdump_backend_option = { diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -291,6 +291,11 @@ op1 = SpaceOperation('-live-', [], None) return [op, op1] + def _noop_rewrite(self, op): + return op + + rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + # ---------- # Various kinds of calls @@ -365,7 +370,7 @@ def handle_builtin_call(self, op): oopspec_name, args = support.decode_builtin_call(op) # dispatch to various implementations depending on the oopspec_name - if oopspec_name.startswith('list.') or oopspec_name == 'newlist': + if oopspec_name.startswith('list.') or oopspec_name.startswith('newlist'): prepare = self._handle_list_call elif oopspec_name.startswith('stroruni.'): prepare = self._handle_stroruni_call @@ -1494,6 +1499,14 @@ arraydescr, v_length], op.result) + def do_resizable_newlist_hint(self, op, args, arraydescr, lengthdescr, + itemsdescr, structdescr): + v_hint = self._get_initial_newlist_length(op, args) + return SpaceOperation('newlist_hint', + [structdescr, lengthdescr, itemsdescr, + arraydescr, v_hint], + op.result) + def do_resizable_list_getitem(self, op, args, arraydescr, lengthdescr, itemsdescr, structdescr): v_index, extraop = self._prepare_list_getset(op, lengthdescr, args, diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -144,6 +144,10 @@ _ll_1_newlist.need_result_type = True _ll_2_newlist.need_result_type = True +def _ll_1_newlist_hint(LIST, hint): + return LIST.ll_newlist_hint(hint) +_ll_1_newlist_hint.need_result_type = True + def _ll_1_list_len(l): return l.ll_length() def _ll_2_list_getitem(l, index): diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,6 +968,21 @@ int_return %i2 """, transform=True) + def test_convert_float_bytes_to_int(self): + from pypy.rlib.longlong2float import float2longlong + def f(x): + return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" + self.encoding_test(f, [25.0], """ + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) + def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys -from pypy.rlib.rarithmetic import r_longlong, intmask +from pypy.rlib.rarithmetic import r_longlong, intmask, is_valid_int from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.objspace.flow.model import Block, Link from pypy.translator.unsimplify import varoftype @@ -32,7 +32,7 @@ def test_functions(): xll = longlong.getfloatstorage(3.5) assert longlong.getrealfloat(xll) == 3.5 - assert isinstance(longlong.gethash(xll), int) + assert is_valid_int(longlong.gethash(xll)) class TestLongLong: diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,15 +1,16 @@ +from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr +from pypy.jit.metainterp.compile import ResumeAtPositionDescr +from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise +from pypy.rlib import longlong2float +from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rtimer import read_timestamp -from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import debug_start, debug_stop, ll_assert -from pypy.rlib.debug import make_sure_not_resized from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from pypy.jit.codewriter import heaptracker, longlong -from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise -from pypy.jit.metainterp.compile import ResumeAtPositionDescr + def arguments(*argtypes, **kwds): resulttype = kwds.pop('returns', None) @@ -20,6 +21,9 @@ return function return decorate +LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f' + + class LeaveFrame(JitException): pass @@ -663,6 +667,11 @@ a = float(a) return longlong.getfloatstorage(a) + @arguments("f", returns=LONGLONG_TYPECODE) + def bhimpl_convert_float_bytes_to_longlong(a): + a = longlong.getrealfloat(a) + return longlong2float.float2longlong(a) + # ---------- # control flow operations @@ -982,6 +991,15 @@ cpu.bh_setfield_gc_r(result, itemsdescr, items) return result + @arguments("cpu", "d", "d", "d", "d", "i", returns="r") + def bhimpl_newlist_hint(cpu, structdescr, lengthdescr, itemsdescr, + arraydescr, lengthhint): + result = cpu.bh_new(structdescr) + cpu.bh_setfield_gc_i(result, lengthdescr, 0) + items = cpu.bh_new_array(arraydescr, lengthhint) + cpu.bh_setfield_gc_r(result, itemsdescr, items) + return result + @arguments("cpu", "r", "d", "d", "i", returns="i") def bhimpl_getlistitem_gc_i(cpu, lst, itemsdescr, arraydescr, index): items = cpu.bh_getfield_gc_r(lst, itemsdescr) @@ -1176,14 +1194,14 @@ def bhimpl_getinteriorfield_gc_f(cpu, array, index, descr): return cpu.bh_getinteriorfield_gc_f(array, index, descr) - @arguments("cpu", "r", "i", "d", "i") - def bhimpl_setinteriorfield_gc_i(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "i", "d") + def bhimpl_setinteriorfield_gc_i(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_i(array, index, descr, value) - @arguments("cpu", "r", "i", "d", "r") - def bhimpl_setinteriorfield_gc_r(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "r", "d") + def bhimpl_setinteriorfield_gc_r(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_r(array, index, descr, value) - @arguments("cpu", "r", "i", "d", "f") - def bhimpl_setinteriorfield_gc_f(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "f", "d") + def bhimpl_setinteriorfield_gc_f(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_f(array, index, descr, value) @arguments("cpu", "r", "d", returns="i") @@ -1300,7 +1318,7 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=(longlong.is_64_bit and "i" or "f")) + @arguments(returns=LONGLONG_TYPECODE) def bhimpl_ll_read_timestamp(): return read_timestamp() @@ -1379,7 +1397,8 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) + if not dont_change_position: + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -289,8 +289,21 @@ assert isinstance(token, TargetToken) assert token.original_jitcell_token is None token.original_jitcell_token = trace.original_jitcell_token - - + + +def do_compile_loop(metainterp_sd, inputargs, operations, looptoken, + log=True, name=''): + metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, + 'compiling', name=name) + return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + log=log, name=name) + +def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, + original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2) + return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -319,9 +332,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - original_jitcell_token, - name=loopname) + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -333,7 +346,6 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: @@ -365,9 +377,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs, - operations, - original_loop_token) + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -2,7 +2,7 @@ """ from pypy.rpython.lltypesystem import lltype, rstr -from pypy.rlib.rarithmetic import ovfcheck, r_longlong +from pypy.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr @@ -248,7 +248,7 @@ def do_read_timestamp(cpu, _): x = read_timestamp() if longlong.is_64_bit: - assert isinstance(x, int) # 64-bit + assert is_valid_int(x) # 64-bit return BoxInt(x) else: assert isinstance(x, r_longlong) # 32-bit diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -169,9 +169,9 @@ if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -4,7 +4,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.objectmodel import we_are_translated, Symbolic from pypy.rlib.objectmodel import compute_unique_id -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, is_valid_int + from pypy.conftest import option from pypy.jit.metainterp.resoperation import ResOperation, rop @@ -213,7 +214,7 @@ def __init__(self, value): if not we_are_translated(): - if isinstance(value, int): + if is_valid_int(value): value = int(value) # bool -> int else: assert isinstance(value, Symbolic) @@ -448,7 +449,7 @@ def __init__(self, value=0): if not we_are_translated(): - if isinstance(value, int): + if is_valid_int(value): value = int(value) # bool -> int else: assert isinstance(value, Symbolic) diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -18,6 +18,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif number == -2: + debug_start("jit-log-compiling-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, @@ -31,6 +35,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif number == -2: + debug_start("jit-log-compiling-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, @@ -102,9 +110,9 @@ def repr_of_resop(self, op, ops_offset=None): if op.getopnum() == rop.DEBUG_MERGE_POINT: jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) s = s.replace(',', '.') # we use comma for argument splitting - return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s) if ops_offset is None: offset = -1 else: @@ -141,7 +149,7 @@ if target_token.exported_state: for op in target_token.exported_state.inputarg_setup_ops: debug_print(' ' + self.repr_of_resop(op)) - + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from pypy.rlib.jit import PARAMETERS +from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -30,6 +30,9 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) +assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( + 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) + def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,10 +1,9 @@ -from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.history import BoxInt, ConstInt -import sys -MAXINT = sys.maxint -MININT = -sys.maxint - 1 +MAXINT = maxint +MININT = -maxint - 1 class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -16,8 +15,8 @@ self.lower = lower # check for unexpected overflows: if not we_are_translated(): - assert type(upper) is not long - assert type(lower) is not long + assert type(upper) is not long or is_valid_int(upper) + assert type(lower) is not long or is_valid_int(lower) # Returns True if the bound was updated def make_le(self, other): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -398,6 +398,40 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_issue1045(self): + ops = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55) + i3 = int_mod(i55, 2) + i5 = int_rshift(i3, 63) + i6 = int_and(2, i5) + i7 = int_add(i3, i6) + i8 = int_eq(i7, 1) + escape(i8) + jump(i55) + """ + expected = """ + [i55] + i73 = int_mod(i55, 2) + i75 = int_rshift(i73, 63) + i76 = int_and(2, i75) + i77 = int_add(i73, i76) + i81 = int_eq(i77, 1) + i0 = int_ge(i55, 1) + guard_true(i0) [] + label(i55, i81) + escape(i81) + jump(i55, i81) + """ + self.optimize_loop(ops, expected) + class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) @@ -423,7 +457,7 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) - def test_optimizer_renaming_boxes(self): + def test_optimizer_renaming_boxes1(self): ops = """ [p1] i1 = strlen(p1) @@ -457,7 +491,6 @@ jump(p1, i11) """ self.optimize_loop(ops, expected) - class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5031,6 +5031,42 @@ """ self.optimize_loop(ops, expected) + def test_str_copy_virtual(self): + ops = """ + [i0] + p0 = newstr(8) + strsetitem(p0, 0, i0) + strsetitem(p0, 1, i0) + strsetitem(p0, 2, i0) + strsetitem(p0, 3, i0) + strsetitem(p0, 4, i0) + strsetitem(p0, 5, i0) + strsetitem(p0, 6, i0) + strsetitem(p0, 7, i0) + p1 = newstr(12) + copystrcontent(p0, p1, 0, 0, 8) + strsetitem(p1, 8, 3) + strsetitem(p1, 9, 0) + strsetitem(p1, 10, 0) + strsetitem(p1, 11, 0) + finish(p1) + """ + expected = """ + [i0] + p1 = newstr(12) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i0) + strsetitem(p1, 2, i0) + strsetitem(p1, 3, i0) + strsetitem(p1, 4, i0) + strsetitem(p1, 5, i0) + strsetitem(p1, 6, i0) + strsetitem(p1, 7, i0) + strsetitem(p1, 8, 3) + finish(p1) + """ + self.optimize_strunicode_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot -from pypy.rlib.debug import debug_print import sys, os # FIXME: Introduce some VirtualOptimizer super class instead @@ -121,9 +120,9 @@ limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit if cell_token.retraced_count < limit: cell_token.retraced_count += 1 - debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: - debug_print("Retrace count reached, jumping to preamble") + #debug_print("Retrace count reached, jumping to preamble") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) @@ -260,7 +259,7 @@ if op and op.result: preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): + if not value.is_virtual() and not value.is_constant(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) @@ -268,12 +267,14 @@ # note that emitting here SAME_AS should not happen, but # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. - if newresult is not op.result and not newvalue.is_constant(): + if newresult is not op.result and \ + not self.short_boxes.has_producer(newresult) and \ + not newvalue.is_constant(): op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations.append(op) - if self.optimizer.loop.logops: - debug_print(' Falling back to add extra: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Falling back to add extra: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -339,8 +340,8 @@ if i == len(newoperations): while j < len(jumpargs): a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: @@ -351,11 +352,11 @@ if op.is_guard(): args = args + op.getfailargs() - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + #if self.optimizer.loop.logops: + # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) self.import_box(a, inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -368,18 +369,18 @@ # that is compatible with the virtual state at the start of the loop modifier = VirtualStateAdder(self.optimizer) final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print('Closed loop with ') bad = {} if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') + #final_virtual_state.debug_print("Bad virtual state at end of loop, ", + # bad) + #debug_stop('jit-log-virtualstate') raise InvalidLoop - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards if self.optimizer.emitted_guards > maxguards: @@ -442,9 +443,9 @@ self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer, seen) - if self.optimizer.loop.logops: - debug_print(' Emitting short op: ' + - self.optimizer.loop.logops.repr_of_resop(op)) + #if self.optimizer.loop.logops: + # debug_print(' Emitting short op: ' + + # self.optimizer.loop.logops.repr_of_resop(op)) optimizer.send_extra_operation(op) seen[op.result] = True @@ -525,8 +526,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + #debug_start('jit-log-virtualstate') + #virtual_state.debug_print("Looking for ") for target in cell_token.target_tokens: if not target.virtual_state: @@ -535,10 +536,10 @@ extra_guards = [] bad = {} - debugmsg = 'Did not match ' + #debugmsg = 'Did not match ' if target.virtual_state.generalization_of(virtual_state, bad): ok = True - debugmsg = 'Matched ' + #debugmsg = 'Matched ' else: try: cpu = self.optimizer.cpu @@ -547,13 +548,13 @@ extra_guards) ok = True - debugmsg = 'Guarded to match ' + #debugmsg = 'Guarded to match ' except InvalidLoop: pass - target.virtual_state.debug_print(debugmsg, bad) + #target.virtual_state.debug_print(debugmsg, bad) if ok: - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') values = [self.getvalue(arg) for arg in jumpop.getarglist()] @@ -574,13 +575,13 @@ newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) return True - debug_stop('jit-log-virtualstate') + #debug_stop('jit-log-virtualstate') return False class ValueImporter(object): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -681,13 +681,14 @@ self.synthetic[op] = True def debug_print(self, logops): - debug_start('jit-short-boxes') - for box, op in self.short_boxes.items(): - if op: - debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) - else: - debug_print(logops.repr_of_arg(box) + ': None') - debug_stop('jit-short-boxes') + if 0: + debug_start('jit-short-boxes') + for box, op in self.short_boxes.items(): + if op: + debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) + else: + debug_print(logops.repr_of_arg(box) + ': None') + debug_stop('jit-short-boxes') def operations(self): if not we_are_translated(): # For tests diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -10,6 +10,8 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rpython import annlowlevel from pypy.rpython.lltypesystem import lltype, rstr +from pypy.rlib.rarithmetic import is_valid_int + class StrOrUnicode(object): @@ -505,14 +507,23 @@ if length.is_constant() and length.box.getint() == 0: return - copy_str_content(self, - src.force_box(self), - dst.force_box(self), - srcstart.force_box(self), - dststart.force_box(self), - length.force_box(self), - mode, need_next_offset=False - ) + elif (src.is_virtual() and dst.is_virtual() and srcstart.is_constant() and + dststart.is_constant() and length.is_constant()): + + src_start = srcstart.force_box(self).getint() + dst_start = dststart.force_box(self).getint() + for index in range(length.force_box(self).getint()): + vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) + dst.setitem(index + dst_start, vresult) + else: + copy_str_content(self, + src.force_box(self), + dst.force_box(self), + srcstart.force_box(self), + dststart.force_box(self), + length.force_box(self), + mode, need_next_offset=False + ) def optimize_CALL(self, op): # dispatch based on 'oopspecindex' to a method that handles @@ -721,7 +732,7 @@ for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) - assert isinstance(value, int) and value != 0 + assert is_valid_int(value) and value != 0 result.append((value, getattr(OptString, name))) return unrolling_iterable(result) opt_call_oopspec_ops = _findall_call_oopspec() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -223,6 +223,7 @@ 'cast_float_to_singlefloat', 'cast_singlefloat_to_float', 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', + 'convert_float_bytes_to_longlong', ]: exec py.code.Source(''' @arguments("box") @@ -509,6 +510,15 @@ self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) return sbox + @arguments("descr", "descr", "descr", "descr", "box") + def opimpl_newlist_hint(self, structdescr, lengthdescr, itemsdescr, + arraydescr, sizehintbox): + sbox = self.opimpl_new(structdescr) + self._opimpl_setfield_gc_any(sbox, lengthdescr, history.CONST_FALSE) + abox = self.opimpl_new_array(arraydescr, sizehintbox) + self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) + return sbox + @arguments("box", "descr", "descr", "box") def _opimpl_getlistitem_gc_any(self, listbox, itemsdescr, arraydescr, indexbox): @@ -974,9 +984,11 @@ any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) - self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth, + self.debug_merge_point(jitdriver_sd, jdindex, + self.metainterp.portal_call_depth, + self.metainterp.call_ids[-1], greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return @@ -1028,11 +1040,11 @@ assembler_call=True) raise ChangeFrame - def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey): + def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) - args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey + args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") @@ -1574,11 +1586,14 @@ self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.call_ids = [] + self.current_call_id = 0 + def retrace_needed(self, trace): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 self.heapcache.reset() - + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1592,6 +1607,8 @@ def newframe(self, jitcode, greenkey=None): if jitcode.is_portal: self.portal_call_depth += 1 + self.call_ids.append(self.current_call_id) + self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (greenkey, len(self.history.operations))) @@ -1608,6 +1625,7 @@ jitcode = frame.jitcode if jitcode.is_portal: self.portal_call_depth -= 1 + self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( (None, len(self.history.operations))) @@ -1976,7 +1994,7 @@ # Found! Compile it as a loop. # raises in case it works -- which is the common case if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! @@ -2064,11 +2082,12 @@ pass # XXX we want to do something special in resume descr, # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass + if not dont_change_position: + self.execute_raised(OverflowError(), constant=True) + try: + self.finishframe_exception() + except ChangeFrame: + pass elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing self.clear_exception() else: @@ -2084,7 +2103,7 @@ if not token.target_tokens: return None return token - + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] @@ -2349,7 +2368,7 @@ # warmstate.py. virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -419,6 +419,7 @@ 'CAST_INT_TO_FLOAT/1', # need some messy code in the backend 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', + 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -1101,14 +1101,14 @@ virtualizable = self.decode_ref(numb.nums[index]) if self.resume_after_guard_not_forced == 1: # in the middle of handle_async_forcing() - assert vinfo.gettoken(virtualizable) - vinfo.settoken(virtualizable, vinfo.TOKEN_NONE) + assert vinfo.is_token_nonnull_gcref(virtualizable) + vinfo.reset_token_gcref(virtualizable) else: # just jumped away from assembler (case 4 in the comment in # virtualizable.py) into tracing (case 2); check that vable_token # is and stays 0. Note the call to reset_vable_token() in # warmstate.py. - assert not vinfo.gettoken(virtualizable) + assert not vinfo.is_token_nonnull_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3,6 +3,7 @@ import py from pypy import conftest +from pypy.jit.codewriter import longlong from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT @@ -14,7 +15,8 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -144,7 +146,7 @@ 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) - def test_loop_invariant_mul_ovf(self): + def test_loop_invariant_mul_ovf1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -235,6 +237,65 @@ 'guard_true': 4, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) + def test_loop_invariant_mul_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + try: + res += ovfcheck(x * x) + b + except OverflowError: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [sys.maxint, 7]) + assert res == f(sys.maxint, 7) + self.check_trace_count(1) + res = self.meta_interp(f, [6, 7]) + assert res == 308 + + def test_loop_invariant_mul_bridge_ovf1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2']) + def f(x1, x2, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + if y<32 and (y>>2)&1==0: + x1, x2 = x2, x1 + y -= 1 + return res + res = self.meta_interp(f, [6, sys.maxint, 48]) + assert res == f(6, sys.maxint, 48) + + def test_loop_invariant_mul_bridge_ovf2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n']) + def f(x1, x2, n, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n) + try: + res += ovfcheck(x1 * x1) + except OverflowError: + res += 1 + y -= 1 + if y&4 == 0: + x1, x2 = x2, x1 + return res + res = self.meta_interp(f, [6, sys.maxint, 32, 48]) + assert res == f(6, sys.maxint, 32, 48) + res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) + assert res == f(sys.maxint, 6, 32, 48) + + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: @@ -894,7 +955,7 @@ self.meta_interp(f, [20], repeat=7) # the loop and the entry path as a single trace self.check_jitcell_token_count(1) - + # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path @@ -1411,7 +1472,7 @@ assert res == f(299) self.check_resops(guard_class=0, guard_nonnull=4, guard_nonnull_class=4, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1440,7 +1501,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1469,7 +1530,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -2237,7 +2298,7 @@ self.check_resops(int_rshift=3) bigval = 1 - while (bigval << 3).__class__ is int: + while is_valid_int(bigval << 3): bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 @@ -2282,7 +2343,7 @@ self.check_resops(int_rshift=3) bigval = 1 - while (bigval << 3).__class__ is int: + while is_valid_int(bigval << 3): bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 @@ -2577,7 +2638,7 @@ return sa assert self.meta_interp(f, [20]) == f(20) self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) - + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2618,7 +2679,7 @@ assert self.meta_interp(f, [20, 3]) == f(20, 3) self.check_jitcell_token_count(1) self.check_target_token_count(5) - + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2756,7 +2817,7 @@ for cell in get_stats().get_all_jitcell_tokens(): # Initialal trace with two labels and 5 retraces assert len(cell.target_tokens) <= 7 - + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -2943,11 +3004,18 @@ self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) + myjitdriver = JitDriver(greens = [], reds = ['n', 'a']) + class A: + pass def f(n): sa = i = rffi.cast(rffi.ULONGLONG, 1) + a = A() while i < rffi.cast(rffi.ULONGLONG, n): - myjitdriver.jit_merge_point(sa=sa, n=n, i=i) + a.sa = sa + a.i = i + myjitdriver.jit_merge_point(n=n, a=a) + sa = a.sa + i = a.i sa += sa % i i += 1 res = self.meta_interp(f, [32]) @@ -3718,6 +3786,25 @@ assert res == 11 * 12 * 13 self.check_operations_history(int_add=3, int_mul=2) + def test_setinteriorfield(self): + A = lltype.GcArray(lltype.Struct('S', ('x', lltype.Signed))) + a = lltype.malloc(A, 5, immortal=True) + def g(n): + a[n].x = n + 2 + return a[n].x + res = self.interp_operations(g, [1]) + assert res == 3 + + def test_float2longlong(self): + def f(n): + return float2longlong(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + # There are tests elsewhere to verify the correctness of this. + expected = float2longlong(x) + res = self.interp_operations(f, [x]) + assert longlong.getfloatstorage(res) == expected + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -14,7 +14,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, name=''): + def compile_loop(self, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,4 +1,5 @@ import py +from pypy.rlib.objectmodel import newlist_hint from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -228,6 +229,28 @@ self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + def test_newlist_hint(self): + def f(i): + l = newlist_hint(i) + l[0] = 55 + return len(l) + + r = self.interp_operations(f, [3]) + assert r == 0 + + def test_newlist_hint_optimized(self): + driver = JitDriver(greens = [], reds = ['i']) + + def f(i): + while i > 0: + driver.jit_merge_point(i=i) + l = newlist_hint(5) + l.append(1) + i -= l[0] + + self.meta_interp(f, [10], listops=True) + self.check_resops(new_array=0, call=0) + class TestOOtype(ListTests, OOJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -54,7 +54,7 @@ class FakeJitDriver(object): class warmstate(object): get_location_str = staticmethod(lambda args: "dupa") - + class FakeMetaInterpSd: cpu = AbstractCPU() cpu.ts = self.ts @@ -77,7 +77,7 @@ equaloplists(loop.operations, oloop.operations) assert oloop.inputargs == loop.inputargs return logger, loop, oloop - + def test_simple(self): inp = ''' [i0, i1, i2, p3, p4, p5] @@ -116,12 +116,13 @@ def test_debug_merge_point(self): inp = ''' [] - debug_merge_point(0, 0) + debug_merge_point(0, 0, 0) ''' _, loop, oloop = self.reparse(inp, check_equal=False) assert loop.operations[0].getarg(1).getint() == 0 - assert oloop.operations[0].getarg(1)._get_str() == "dupa" - + assert loop.operations[0].getarg(2).getint() == 0 + assert oloop.operations[0].getarg(2)._get_str() == "dupa" + def test_floats(self): inp = ''' [f0] @@ -142,7 +143,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "jump(i0, descr=)" pure_parse(output) - + def test_guard_descr(self): namespace = {'fdescr': BasicFailDescr()} inp = ''' @@ -154,7 +155,7 @@ output = logger.log_loop(loop) assert output.splitlines()[-1] == "guard_true(i0, descr=) [i0]" pure_parse(output) - + logger = Logger(self.make_metainterp_sd(), guard_number=False) output = logger.log_loop(loop) lastline = output.splitlines()[-1] diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe def test_get_current_qmut_instance(): @@ -480,6 +480,32 @@ assert res == 1 self.check_jitcell_token_count(2) + def test_for_loop_array(self): + myjitdriver = JitDriver(greens=[], reds=["n", "i"]) + class Foo(object): + _immutable_fields_ = ["x?[*]"] + def __init__(self, x): + self.x = x + f = Foo([1, 3, 5, 6]) + @unroll_safe + def g(v): + for x in f.x: + if x & 1 == 0: + v += 1 + return v + def main(n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i) + i = g(i) + return i + res = self.meta_interp(main, [10]) + assert res == 10 + self.check_resops({ + "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2, + "guard_not_invalidated": 2 + }) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,7 @@ class WarmspotTests(object): - + def test_basic(self): mydriver = JitDriver(reds=['a'], greens=['i']) @@ -77,16 +77,16 @@ self.meta_interp(f, [123, 10]) assert len(get_stats().locations) >= 4 for loc in get_stats().locations: - assert loc == (0, 123) + assert loc == (0, 0, 123) def test_set_param_enable_opts(self): from pypy.rpython.annlowlevel import llstr, hlstr - + myjitdriver = JitDriver(greens = [], reds = ['n']) class A(object): def m(self, n): return n-1 - + def g(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -332,7 +332,7 @@ ts = llhelper translate_support_code = False stats = "stats" - + def get_fail_descr_number(self, d): return -1 @@ -352,7 +352,7 @@ return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) - + def f(green): red = 0 while red < 10: diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -262,15 +262,15 @@ force_now._dont_inline_ = True self.force_now = force_now - def gettoken(virtualizable): + def is_token_nonnull_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - return virtualizable.vable_token - self.gettoken = gettoken + return bool(virtualizable.vable_token) + self.is_token_nonnull_gcref = is_token_nonnull_gcref - def settoken(virtualizable, token): + def reset_token_gcref(virtualizable): virtualizable = cast_gcref_to_vtype(virtualizable) - virtualizable.vable_token = token - self.settoken = settoken + virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE + self.reset_token_gcref = reset_token_gcref def _freeze_(self): return True diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -100,7 +100,7 @@ if not kwds.get('translate_support_code', False): warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() - print '~~~ return value:', res + print '~~~ return value:', repr(res) while repeat > 1: print '~' * 79 res1 = interp.eval_graph(graph, args) diff --git a/pypy/jit/tl/tinyframe/tinyframe.py b/pypy/jit/tl/tinyframe/tinyframe.py --- a/pypy/jit/tl/tinyframe/tinyframe.py +++ b/pypy/jit/tl/tinyframe/tinyframe.py @@ -210,7 +210,7 @@ def repr(self): return "" % (self.outer.repr(), self.inner.repr()) -driver = JitDriver(greens = ['code', 'i'], reds = ['self'], +driver = JitDriver(greens = ['i', 'code'], reds = ['self'], virtualizables = ['self']) class Frame(object): diff --git a/pypy/jit/tl/tlc.py b/pypy/jit/tl/tlc.py --- a/pypy/jit/tl/tlc.py +++ b/pypy/jit/tl/tlc.py @@ -6,6 +6,8 @@ from pypy.jit.tl.tlopcode import * from pypy.jit.tl import tlopcode from pypy.rlib.jit import JitDriver, elidable +from pypy.rlib.rarithmetic import is_valid_int + class Obj(object): @@ -219,7 +221,7 @@ class Frame(object): def __init__(self, args, pc): - assert isinstance(pc, int) + assert is_valid_int(pc) self.args = args self.pc = pc self.stack = [] @@ -239,7 +241,7 @@ return interp_eval(code, pc, args, pool).int_o() def interp_eval(code, pc, args, pool): - assert isinstance(pc, int) + assert is_valid_int(pc) frame = Frame(args, pc) pc = frame.pc diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -146,16 +146,18 @@ def test_debug_merge_point(self): x = ''' [] - debug_merge_point(0, "info") - debug_merge_point(0, 'info') - debug_merge_point(1, ' info') - debug_merge_point(0, '(stuff) #1') + debug_merge_point(0, 0, "info") + debug_merge_point(0, 0, 'info') + debug_merge_point(1, 1, ' info') + debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - assert loop.operations[0].getarg(1)._get_str() == 'info' - assert loop.operations[1].getarg(1)._get_str() == 'info' - assert loop.operations[2].getarg(1)._get_str() == " info" - assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -8,8 +8,6 @@ from __pypy__ import lookup_special def _caller_locals(): - # note: the reason why this is working is because the functions in here are - # compiled by geninterp, so they don't have a frame return sys._getframe(0).f_locals def vars(*obj): @@ -26,17 +24,6 @@ except AttributeError: raise TypeError, "vars() argument must have __dict__ attribute" -# Replaced by the interp-level helper space.callable(): -##def callable(ob): -## import __builtin__ # XXX this is insane but required for now for geninterp -## for c in type(ob).__mro__: -## if '__call__' in c.__dict__: -## if isinstance(ob, __builtin__._instance): # old style instance! -## return getattr(ob, '__call__', None) is not None -## return True -## else: -## return False - def dir(*args): """dir([object]) -> list of strings diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -1,5 +1,5 @@ +import sys -# Package initialisation from pypy.interpreter.mixedmodule import MixedModule from pypy.module.imp.importing import get_pyc_magic @@ -12,6 +12,19 @@ "UnicodeBuilder": "interp_builders.W_UnicodeBuilder", } +class TimeModule(MixedModule): + appleveldefs = {} + interpleveldefs = {} + if sys.platform.startswith("linux"): + interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" + interpleveldefs["clock_getres"] = "interp_time.clock_getres" + for name in [ + "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", + "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" + ]: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + + class Module(MixedModule): appleveldefs = { } @@ -32,6 +45,7 @@ submodules = { "builders": BuildersModule, + "time": TimeModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_time.py @@ -0,0 +1,64 @@ +import sys + +from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.gateway import unwrap_spec +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rpython.tool import rffi_platform +from pypy.translator.tool.cbuild import ExternalCompilationInfo + + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=["time.h"], + libraries=["rt"], + ) + + HAS_CLOCK_GETTIME = rffi_platform.Has('clock_gettime') + + CLOCK_REALTIME = rffi_platform.DefinedConstantInteger("CLOCK_REALTIME") + CLOCK_MONOTONIC = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC") + CLOCK_MONOTONIC_RAW = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC_RAW") + CLOCK_PROCESS_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_PROCESS_CPUTIME_ID") + CLOCK_THREAD_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_THREAD_CPUTIME_ID") + + TIMESPEC = rffi_platform.Struct("struct timespec", [ + ("tv_sec", rffi.TIME_T), + ("tv_nsec", rffi.LONG), + ]) + +cconfig = rffi_platform.configure(CConfig) + +HAS_CLOCK_GETTIME = cconfig["HAS_CLOCK_GETTIME"] + +CLOCK_REALTIME = cconfig["CLOCK_REALTIME"] +CLOCK_MONOTONIC = cconfig["CLOCK_MONOTONIC"] +CLOCK_MONOTONIC_RAW = cconfig["CLOCK_MONOTONIC_RAW"] +CLOCK_PROCESS_CPUTIME_ID = cconfig["CLOCK_PROCESS_CPUTIME_ID"] +CLOCK_THREAD_CPUTIME_ID = cconfig["CLOCK_THREAD_CPUTIME_ID"] + +TIMESPEC = cconfig["TIMESPEC"] + +c_clock_gettime = rffi.llexternal("clock_gettime", + [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, + compilation_info=CConfig._compilation_info_, threadsafe=False +) +c_clock_getres = rffi.llexternal("clock_getres", + [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, + compilation_info=CConfig._compilation_info_, threadsafe=False +) + + at unwrap_spec(clk_id="c_int") +def clock_gettime(space, clk_id): + with lltype.scoped_alloc(TIMESPEC) as tp: + ret = c_clock_gettime(clk_id, tp) + if ret != 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9) + + at unwrap_spec(clk_id="c_int") +def clock_getres(space, clk_id): + with lltype.scoped_alloc(TIMESPEC) as tp: + ret = c_clock_getres(clk_id, tp) + if ret != 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9) diff --git a/pypy/module/__pypy__/test/test_time.py b/pypy/module/__pypy__/test/test_time.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_time.py @@ -0,0 +1,26 @@ +import py + +from pypy.module.__pypy__.interp_time import HAS_CLOCK_GETTIME + + +class AppTestTime(object): + def setup_class(cls): + if not HAS_CLOCK_GETTIME: + py.test.skip("need time.clock_gettime") + + def test_clock_realtime(self): + from __pypy__ import time + res = time.clock_gettime(time.CLOCK_REALTIME) + assert isinstance(res, float) + + def test_clock_monotonic(self): + from __pypy__ import time + a = time.clock_gettime(time.CLOCK_MONOTONIC) + b = time.clock_gettime(time.CLOCK_MONOTONIC) + assert a <= b + + def test_clock_getres(self): + from __pypy__ import time + res = time.clock_getres(time.CLOCK_REALTIME) + assert res > 0.0 + assert res <= 1.0 diff --git a/pypy/module/_ffi/test/test__ffi.py b/pypy/module/_ffi/test/test__ffi.py --- a/pypy/module/_ffi/test/test__ffi.py +++ b/pypy/module/_ffi/test/test__ffi.py @@ -100,7 +100,10 @@ from _ffi import CDLL, types libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') - assert pow_addr == self.pow_addr & (sys.maxint*2-1) + fff = sys.maxint*2-1 + if sys.platform == 'win32': + fff = sys.maxint*2+1 + assert pow_addr == self.pow_addr & fff def test_func_fromaddr(self): import sys diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -5,14 +5,13 @@ from pypy.rlib import streamio from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors, - wrap_streamerror, wrap_oserror_as_ioerror) +from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec - +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -2,27 +2,13 @@ from pypy.rlib import streamio from pypy.rlib.streamio import StreamErrors -from pypy.interpreter.error import OperationError, wrap_oserror2 +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import ObjSpace, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror -def wrap_streamerror(space, e, w_filename=None): - if isinstance(e, streamio.StreamError): - return OperationError(space.w_ValueError, - space.wrap(e.message)) - elif isinstance(e, OSError): - return wrap_oserror_as_ioerror(space, e, w_filename) - else: - # should not happen: wrap_streamerror() is only called when - # StreamErrors = (OSError, StreamError) are raised - return OperationError(space.w_IOError, space.w_None) - -def wrap_oserror_as_ioerror(space, e, w_filename=None): - return wrap_oserror2(space, e, w_filename, - w_exception_class=space.w_IOError) - class W_AbstractStream(Wrappable): """Base class for interp-level objects that expose streams to app-level""" slock = None diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -28,6 +28,7 @@ } def init(self, space): + MixedModule.init(self, space) w_UnsupportedOperation = space.call_function( space.w_type, space.wrap('UnsupportedOperation'), @@ -35,3 +36,9 @@ space.newdict()) space.setattr(self, space.wrap('UnsupportedOperation'), w_UnsupportedOperation) + + def shutdown(self, space): + # at shutdown, flush all open streams. Ignore I/O errors. + from pypy.module._io.interp_iobase import get_autoflushher + get_autoflushher(space).flush_all(space) + diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,6 +5,8 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib.rstring import StringBuilder +from pypy.rlib import rweakref + DEFAULT_BUFFER_SIZE = 8192 @@ -43,6 +45,8 @@ self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False + self.streamholder = None # needed by AutoFlusher + get_autoflushher(space).add(self) def getdict(self, space): return self.w_dict @@ -98,6 +102,7 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True + get_autoflushher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -303,3 +308,60 @@ read = interp2app(W_RawIOBase.read_w), readall = interp2app(W_RawIOBase.readall_w), ) + + +# ------------------------------------------------------------ +# functions to make sure that all streams are flushed on exit +# ------------------------------------------------------------ + +class StreamHolder(object): + + def __init__(self, w_iobase): + self.w_iobase_ref = rweakref.ref(w_iobase) + w_iobase.autoflusher = self + + def autoflush(self, space): + w_iobase = self.w_iobase_ref() + if w_iobase is not None: + try: + space.call_method(w_iobase, 'flush') + except OperationError, e: + # if it's an IOError or ValueError, ignore it (ValueError is + # raised if by chance we are trying to flush a file which has + # already been closed) + if not (e.match(space, space.w_IOError) or + e.match(space, space.w_ValueError)): + raise + + +class AutoFlusher(object): + + def __init__(self, space): + self.streams = {} + + def add(self, w_iobase): + assert w_iobase.streamholder is None + holder = StreamHolder(w_iobase) + w_iobase.streamholder = holder + self.streams[holder] = None + + def remove(self, w_iobase): + holder = w_iobase.streamholder + if holder is not None: + del self.streams[holder] + + def flush_all(self, space): + while self.streams: + for streamholder in self.streams.keys(): + try: + del self.streams[streamholder] + except KeyError: + pass # key was removed in the meantime + else: + streamholder.autoflush(space) + + +def get_autoflushher(space): + return space.fromcache(AutoFlusher) + + diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -160,3 +160,42 @@ f.close() assert repr(f) == "<_io.FileIO [closed]>" +def test_flush_at_exit(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + from pypy.tool.udir import udir + + tmpfile = udir.join('test_flush_at_exit') + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([space.wrap(str(tmpfile))], """(tmpfile): + import io + f = io.open(tmpfile, 'w', encoding='ascii') + f.write('42') + # no flush() and no close() + import sys; sys._keepalivesomewhereobscure = f + """) + space.finish() + assert tmpfile.read() == '42' + +def test_flush_at_exit_IOError_and_ValueError(): + from pypy import conftest + from pypy.tool.option import make_config, make_objspace + + config = make_config(conftest.option) + space = make_objspace(config) + space.appexec([], """(): + import io + class MyStream(io.IOBase): + def flush(self): + raise IOError + + class MyStream2(io.IOBase): + def flush(self): + raise ValueError + + s = MyStream() + s2 = MyStream2() + import sys; sys._keepalivesomewhereobscure = s + """) + space.finish() # the IOError has been ignored diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -22,7 +22,7 @@ eci = ExternalCompilationInfo( separate_module_files=[srcdir.join('profiling.c')], export_symbols=['pypy_setup_profiling', 'pypy_teardown_profiling']) - + c_setup_profiling = rffi.llexternal('pypy_setup_profiling', [], lltype.Void, compilation_info = eci) @@ -228,7 +228,7 @@ if w_self.builtins: key = create_spec(space, w_arg) w_self._enter_builtin_call(key) - elif event == 'c_return': + elif event == 'c_return' or event == 'c_exception': if w_self.builtins: key = create_spec(space, w_arg) w_self._enter_builtin_return(key) @@ -237,7 +237,7 @@ pass class W_Profiler(Wrappable): - + def __init__(self, space, w_callable, time_unit, subcalls, builtins): self.subcalls = subcalls self.builtins = builtins diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -117,6 +117,20 @@ assert 0.9 < subentry.totaltime < 2.9 #assert 0.9 < subentry.inlinetime < 2.9 + def test_builtin_exception(self): + import math + import _lsprof + + prof = _lsprof.Profiler() + prof.enable() + try: + math.sqrt("a") + except TypeError: + pass + prof.disable() + stats = prof.getstats() + assert len(stats) == 2 + def test_use_cprofile(self): import sys, os # XXX this is evil trickery to walk around the fact that we don't diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -28,7 +28,7 @@ assert self.md5.digest_size == 16 #assert self.md5.digestsize == 16 -- not on CPython assert self.md5.md5().digest_size == 16 - if sys.version >= (2, 5): + if sys.version_info >= (2, 5): assert self.md5.blocksize == 1 assert self.md5.md5().digestsize == 16 diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -2,6 +2,7 @@ from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) + class AppTestSemaphore: def setup_class(cls): space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -2,6 +2,7 @@ import os import py + class AppTestSSL: def setup_class(cls): space = gettestobjspace(usemodules=('_ssl', '_socket')) @@ -29,7 +30,6 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, (int, long)) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,6 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import specialize from pypy.rpython.lltypesystem import lltype, rffi @@ -159,13 +160,15 @@ def make_array(mytype): + W_ArrayBase = globals()['W_ArrayBase'] + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @staticmethod def register(typeorder): - typeorder[W_Array] = [] + typeorder[W_Array] = [(W_ArrayBase, None)] def __init__(self, space): self.space = space @@ -583,13 +586,29 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) # Compare methods - def cmp__Array_ANY(space, self, other): - if isinstance(other, W_ArrayBase): - w_lst1 = array_tolist__Array(space, self) - w_lst2 = space.call_method(other, 'tolist') - return space.cmp(w_lst1, w_lst2) - else: - return space.w_NotImplemented + @specialize.arg(3) + def _cmp_impl(space, self, other, space_fn): + w_lst1 = array_tolist__Array(space, self) + w_lst2 = space.call_method(other, 'tolist') + return space_fn(w_lst1, w_lst2) + + def eq__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.eq) + + def ne__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ne) + + def lt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.lt) + + def le__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.le) + + def gt__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.gt) + + def ge__Array_ArrayBase(space, self, other): + return _cmp_impl(space, self, other, space.ge) # Misc methods diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -536,12 +536,6 @@ assert (a >= c) is False assert (c >= a) is True - assert cmp(a, a) == 0 - assert cmp(a, b) == 0 - assert cmp(a, c) < 0 - assert cmp(b, a) == 0 - assert cmp(c, a) > 0 - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -851,8 +845,11 @@ cls.maxint = sys.maxint class AppTestArray(BaseArrayTests): + OPTIONS = {} + def setup_class(cls): - cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi')) + cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'), + **cls.OPTIONS) cls.w_array = cls.space.appexec([], """(): import array return array.array @@ -874,3 +871,7 @@ a = self.array('b', range(4)) a[::-1] = a assert a == self.array('b', [3, 2, 1, 0]) + + +class AppTestArrayBuiltinShortcut(AppTestArray): + OPTIONS = {'objspace.std.builtinshortcut': True} diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -352,6 +352,9 @@ 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', 'PyOS_getsig', 'PyOS_setsig', + 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', + 'PyThread_get_key_value', 'PyThread_delete_key_value', + 'PyThread_ReInitTLS', 'PyStructSequence_InitType', 'PyStructSequence_New', ] @@ -385,6 +388,7 @@ "Tuple": "space.w_tuple", "List": "space.w_list", "Set": "space.w_set", + "FrozenSet": "space.w_frozenset", "Int": "space.w_int", "Bool": "space.w_bool", "Float": "space.w_float", @@ -406,7 +410,7 @@ }.items(): GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Int Long Dict Tuple Class'.split(): + for cpyname in 'Method List Long Dict Tuple Class'.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' 'Py%sObject' % (cpyname, )) build_exported_objects() @@ -616,6 +620,10 @@ lambda space: init_pycobject(), lambda space: init_capsule(), ]) + from pypy.module.posix.interp_posix import add_fork_hook + reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, + compilation_info=eci) + add_fork_hook('child', reinit_tls) def init_function(func): INIT_FUNCTIONS.append(func) @@ -816,6 +824,8 @@ pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") pypy_decls.append("#endif\n") + pypy_decls.append('#define Signed long /* xxx temporary fix */\n') + pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') for decl in FORWARD_DECLS: pypy_decls.append("%s;" % (decl,)) @@ -847,6 +857,8 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + pypy_decls.append('#undef Signed /* xxx temporary fix */\n') + pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("}") pypy_decls.append("#endif") @@ -925,6 +937,7 @@ source_dir / "structseq.c", source_dir / "capsule.c", source_dir / "pysignals.c", + source_dir / "thread.c", ], separate_module_sources=separate_module_sources, export_symbols=export_symbols_eci, diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -1,16 +1,24 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler import consts from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno +from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling PyCompilerFlags = cpython_struct( - "PyCompilerFlags", ()) + "PyCompilerFlags", (("cf_flags", rffi.INT),)) PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags) +PyCF_MASK = (consts.CO_FUTURE_DIVISION | + consts.CO_FUTURE_ABSOLUTE_IMPORT | + consts.CO_FUTURE_WITH_STATEMENT | + consts.CO_FUTURE_PRINT_FUNCTION | + consts.CO_FUTURE_UNICODE_LITERALS) + @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) @@ -48,6 +56,17 @@ return None return borrow_from(None, caller.w_globals) + at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) +def PyEval_EvalCode(space, w_code, w_globals, w_locals): + """This is a simplified interface to PyEval_EvalCodeEx(), with just + the code object, and the dictionaries of global and local variables. + The other arguments are set to NULL.""" + if w_globals is None: + w_globals = space.w_None + if w_locals is None: + w_locals = space.w_None + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([PyObject, PyObject], PyObject) def PyObject_CallObject(space, w_obj, w_arg): """ @@ -74,7 +93,7 @@ Py_file_input = 257 Py_eval_input = 258 -def compile_string(space, source, filename, start): +def compile_string(space, source, filename, start, flags=0): w_source = space.wrap(source) start = rffi.cast(lltype.Signed, start) if start == Py_file_input: @@ -86,7 +105,7 @@ else: raise OperationError(space.w_ValueError, space.wrap( "invalid mode parameter for compilation")) - return compiling.compile(space, w_source, filename, mode) + return compiling.compile(space, w_source, filename, mode, flags) def run_string(space, source, filename, start, w_globals, w_locals): w_code = compile_string(space, source, filename, start) @@ -109,6 +128,24 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) + at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + PyCompilerFlagsPtr], PyObject) +def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): + """Execute Python source code from str in the context specified by the + dictionaries globals and locals with the compiler flags specified by + flags. The parameter start specifies the start token that should be used to + parse the source code. + + Returns the result of executing the code as a Python object, or NULL if an + exception was raised.""" + source = rffi.charp2str(source) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + w_code = compile_string(space, source, "", start, flags) + return compiling.eval(space, w_code, w_globals, w_locals) + @cpython_api([FILEP, CONST_STRING, rffi.INT_real, PyObject, PyObject], PyObject) def PyRun_File(space, fp, filename, start, w_globals, w_locals): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -150,7 +187,7 @@ @cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], PyObject) -def Py_CompileStringFlags(space, source, filename, start, flags): +def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the resulting code object. The start token is given by start; this can be used to constrain the code which can be compiled and should @@ -160,7 +197,30 @@ returns NULL if the code cannot be parsed or compiled.""" source = rffi.charp2str(source) filename = rffi.charp2str(filename) - if flags: - raise OperationError(space.w_NotImplementedError, space.wrap( - "cpyext Py_CompileStringFlags does not accept flags")) - return compile_string(space, source, filename, start) + if flagsptr: + flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags) + else: + flags = 0 + return compile_string(space, source, filename, start, flags) + + at cpython_api([PyCompilerFlagsPtr], rffi.INT_real, error=CANNOT_FAIL) +def PyEval_MergeCompilerFlags(space, cf): + """This function changes the flags of the current evaluation + frame, and returns true on success, false on failure.""" + flags = rffi.cast(lltype.Signed, cf.c_cf_flags) + result = flags != 0 + current_frame = space.getexecutioncontext().gettopframe_nohidden() + if current_frame: + codeflags = current_frame.pycode.co_flags + compilerflags = codeflags & PyCF_MASK + if compilerflags: + result = 1 + flags |= compilerflags + # No future keyword at the moment + # if codeflags & CO_GENERATOR_ALLOWED: + # result = 1 + # flags |= CO_GENERATOR_ALLOWED + cf.c_cf_flags = rffi.cast(rffi.INT, flags) + return result + + diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -1,6 +1,6 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - PyObjectFields, generic_cpy_call, CONST_STRING, + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) @@ -48,6 +48,7 @@ PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function) PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) +PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) def function_attach(space, py_obj, w_obj): py_func = rffi.cast(PyFunctionObject, py_obj) @@ -167,3 +168,9 @@ freevars=[], cellvars=[])) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCode_GetNumFree(space, w_co): + """Return the number of free variables in co.""" + co = space.interp_w(PyCode, w_co) + return len(co.co_freevars) + diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -113,6 +113,7 @@ #include "compile.h" #include "frameobject.h" #include "eval.h" +#include "pymath.h" #include "pymem.h" #include "pycobject.h" #include "pycapsule.h" diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h --- a/pypy/module/cpyext/include/code.h +++ b/pypy/module/cpyext/include/code.h @@ -13,13 +13,19 @@ /* Masks for co_flags above */ /* These values are also in funcobject.py */ -#define CO_OPTIMIZED 0x0001 -#define CO_NEWLOCALS 0x0002 -#define CO_VARARGS 0x0004 -#define CO_VARKEYWORDS 0x0008 +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 #define CO_NESTED 0x0010 #define CO_GENERATOR 0x0020 +#define CO_FUTURE_DIVISION 0x02000 +#define CO_FUTURE_ABSOLUTE_IMPORT 0x04000 +#define CO_FUTURE_WITH_STATEMENT 0x08000 +#define CO_FUTURE_PRINT_FUNCTION 0x10000 +#define CO_FUTURE_UNICODE_LITERALS 0x20000 + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -7,6 +7,11 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + long ob_ival; +} PyIntObject; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -56,6 +56,8 @@ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) +#define _Py_ForgetReference(ob) /* nothing */ + #define Py_None (&_Py_NoneStruct) /* diff --git a/pypy/module/cpyext/include/pymath.h b/pypy/module/cpyext/include/pymath.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/pymath.h @@ -0,0 +1,20 @@ +#ifndef Py_PYMATH_H +#define Py_PYMATH_H + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to mathematical +functions and constants +**************************************************************************/ + +/* HUGE_VAL is supposed to expand to a positive double infinity. Python + * uses Py_HUGE_VAL instead because some platforms are broken in this + * respect. We used to embed code in pyport.h to try to worm around that, + * but different platforms are broken in conflicting ways. If you're on + * a platform where HUGE_VAL is defined incorrectly, fiddle your Python + * config to #define Py_HUGE_VAL to something that works on your platform. + */ +#ifndef Py_HUGE_VAL +#define Py_HUGE_VAL HUGE_VAL +#endif + +#endif /* Py_PYMATH_H */ diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -19,6 +19,14 @@ int cf_flags; /* bitmask of CO_xxx flags relevant to future */ } PyCompilerFlags; +#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \ + CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \ + CO_FUTURE_UNICODE_LITERALS) +#define PyCF_MASK_OBSOLETE (CO_NESTED) +#define PyCF_SOURCE_IS_UTF8 0x0100 +#define PyCF_DONT_IMPLY_DEDENT 0x0200 +#define PyCF_ONLY_AST 0x0400 + #define Py_CompileString(str, filename, start) Py_CompileStringFlags(str, filename, start, NULL) #ifdef __cplusplus diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h --- a/pypy/module/cpyext/include/pythread.h +++ b/pypy/module/cpyext/include/pythread.h @@ -3,8 +3,26 @@ #define WITH_THREAD +#ifdef __cplusplus +extern "C" { +#endif + typedef void *PyThread_type_lock; #define WAIT_LOCK 1 #define NOWAIT_LOCK 0 +/* Thread Local Storage (TLS) API */ +PyAPI_FUNC(int) PyThread_create_key(void); +PyAPI_FUNC(void) PyThread_delete_key(int); +PyAPI_FUNC(int) PyThread_set_key_value(int, void *); +PyAPI_FUNC(void *) PyThread_get_key_value(int); +PyAPI_FUNC(void) PyThread_delete_key_value(int key); + +/* Cleanup after a fork */ +PyAPI_FUNC(void) PyThread_ReInitTLS(void); + +#ifdef __cplusplus +} #endif + +#endif diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -2,11 +2,37 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( - cpython_api, build_type_checkers, PyObject, - CONST_STRING, CANNOT_FAIL, Py_ssize_t) + cpython_api, cpython_struct, build_type_checkers, bootstrap_function, + PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, RefcountState, from_ref) from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST +from pypy.objspace.std.intobject import W_IntObject import sys +PyIntObjectStruct = lltype.ForwardReference() +PyIntObject = lltype.Ptr(PyIntObjectStruct) +PyIntObjectFields = PyObjectFields + \ + (("ob_ival", rffi.LONG),) +cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct) + + at bootstrap_function +def init_intobject(space): + "Type description of PyIntObject" + make_typedescr(space.w_int.instancetypedef, + basestruct=PyIntObject.TO, + realize=int_realize) + +def int_realize(space, obj): + intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_IntObject, w_type) + w_obj.__init__(intval) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj + PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") @cpython_api([], lltype.Signed, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -193,7 +193,7 @@ if not obj: PyErr_NoMemory(space) obj.c_ob_type = type - _Py_NewReference(space, obj) + obj.c_ob_refcnt = 1 return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -17,6 +17,7 @@ class BaseCpyTypedescr(object): basestruct = PyObject.TO + W_BaseObject = W_ObjectObject def get_dealloc(self, space): from pypy.module.cpyext.typeobject import subtype_dealloc @@ -51,10 +52,14 @@ def attach(self, space, pyobj, w_obj): pass - def realize(self, space, ref): - # For most types, a reference cannot exist without - # a real interpreter object - raise InvalidPointerException(str(ref)) + def realize(self, space, obj): + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(self.W_BaseObject, w_type) + track_reference(space, obj, w_obj) + if w_type is not space.gettypefor(self.W_BaseObject): + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj typedescr_cache = {} @@ -369,13 +374,7 @@ obj.c_ob_refcnt = 1 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) - else: - assert False, "Please add more cases in _Py_NewReference()" + get_typedescr(w_type.instancetypedef).realize(space, obj) def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -10,7 +10,7 @@ [('next', PyInterpreterState)], PyInterpreterStateStruct) PyThreadState = lltype.Ptr(cpython_struct( - "PyThreadState", + "PyThreadState", [('interp', PyInterpreterState), ('dict', PyObject), ])) @@ -19,12 +19,15 @@ def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread support is enabled) and reset the thread state to NULL, returning the - previous thread state (which is not NULL except in PyPy). If the lock has been created, + previous thread state. If the lock has been created, the current thread must have acquired it. (This function is available even when thread support is disabled at compile time.)""" + state = space.fromcache(InterpreterState) if rffi.aroundstate.before: rffi.aroundstate.before() - return lltype.nullptr(PyThreadState.TO) + tstate = state.swap_thread_state( + space, lltype.nullptr(PyThreadState.TO)) + return tstate @cpython_api([PyThreadState], lltype.Void) def PyEval_RestoreThread(space, tstate): @@ -35,6 +38,8 @@ when thread support is disabled at compile time.)""" if rffi.aroundstate.after: rffi.aroundstate.after() + state = space.fromcache(InterpreterState) + state.swap_thread_state(space, tstate) @cpython_api([], lltype.Void) def PyEval_InitThreads(space): @@ -67,28 +72,91 @@ dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext + +# Keep track of the ThreadStateCapsule for a particular execution context. The +# default is for new execution contexts not to have one; it is allocated on the +# first cpyext-based request for it. ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) +# Also keep track of whether it has been initialized yet or not (None is a valid +# PyThreadState for an execution context to have, when the GIL has been +# released, so a check against that can't be used to determine the need for +# initialization). +ExecutionContext.cpyext_initialized_threadstate = False + +def cleanup_cpyext_state(self): + try: + del self.cpyext_threadstate + except AttributeError: + pass + self.cpyext_initialized_threadstate = False +ExecutionContext.cleanup_cpyext_state = cleanup_cpyext_state + class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) def new_thread_state(self, space): + """ + Create a new ThreadStateCapsule to hold the PyThreadState for a + particular execution context. + + :param space: A space. + + :returns: A new ThreadStateCapsule holding a newly allocated + PyThreadState and referring to this interpreter state. + """ capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state ts.c_dict = make_ref(space, space.newdict()) return capsule + def get_thread_state(self, space): + """ + Get the current PyThreadState for the current execution context. + + :param space: A space. + + :returns: The current PyThreadState for the current execution context, + or None if it does not have one. + """ ec = space.getexecutioncontext() return self._get_thread_state(space, ec).memory + + def swap_thread_state(self, space, tstate): + """ + Replace the current thread state of the current execution context with a + new thread state. + + :param space: The space. + + :param tstate: The new PyThreadState for the current execution context. + + :returns: The old thread state for the current execution context, either + None or a PyThreadState. + """ + ec = space.getexecutioncontext() + capsule = self._get_thread_state(space, ec) + old_tstate = capsule.memory + capsule.memory = tstate + return old_tstate + def _get_thread_state(self, space, ec): - if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): + """ + Get the ThreadStateCapsule for the given execution context, possibly + creating a new one if it does not already have one. + + :param space: The space. + :param ec: The ExecutionContext of which to get the thread state. + :returns: The ThreadStateCapsule for the given execution context. + """ + if not ec.cpyext_initialized_threadstate: ec.cpyext_threadstate = self.new_thread_state(space) - + ec.cpyext_initialized_threadstate = True return ec.cpyext_threadstate @cpython_api([], PyThreadState, error=CANNOT_FAIL) @@ -105,13 +173,8 @@ def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument tstate, which may be NULL. The global interpreter lock must be held.""" - # All cpyext calls release and acquire the GIL, so this function has no - # side-effects - if tstate: - return lltype.nullptr(PyThreadState.TO) - else: - state = space.fromcache(InterpreterState) - return state.get_thread_state(space) + state = space.fromcache(InterpreterState) + return state.swap_thread_state(space, tstate) @cpython_api([PyThreadState], lltype.Void) def PyEval_AcquireThread(space, tstate): diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -23,16 +23,33 @@ #define FLAG_COMPAT 1 #define FLAG_SIZE_T 2 +typedef int (*destr_t)(PyObject *, void *); + + +/* Keep track of "objects" that have been allocated or initialized and + which will need to be deallocated or cleaned up somehow if overall + parsing fails. +*/ +typedef struct { + void *item; + destr_t destructor; +} freelistentry_t; + +typedef struct { + int first_available; + freelistentry_t *entries; +} freelist_t; + /* Forward */ static int vgetargs1(PyObject *, const char *, va_list *, int); static void seterror(int, const char *, int *, const char *, const char *); static char *convertitem(PyObject *, const char **, va_list *, int, int *, - char *, size_t, PyObject **); + char *, size_t, freelist_t *); static char *converttuple(PyObject *, const char **, va_list *, int, - int *, char *, size_t, int, PyObject **); + int *, char *, size_t, int, freelist_t *); static char *convertsimple(PyObject *, const char **, va_list *, int, char *, - size_t, PyObject **); + size_t, freelist_t *); static Py_ssize_t convertbuffer(PyObject *, void **p, char **); static int getbuffer(PyObject *, Py_buffer *, char**); @@ -129,57 +146,56 @@ /* Handle cleanup of allocated memory in case of exception */ -static void -cleanup_ptr(void *ptr) +static int +cleanup_ptr(PyObject *self, void *ptr) { - PyMem_FREE(ptr); -} - -static void -cleanup_buffer(void *ptr) -{ - PyBuffer_Release((Py_buffer *) ptr); + if (ptr) { + PyMem_FREE(ptr); + } + return 0; } static int -addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) +cleanup_buffer(PyObject *self, void *ptr) { - PyObject *cobj; - if (!*freelist) { - *freelist = PyList_New(0); - if (!*freelist) { - destr(ptr); - return -1; - } - } - cobj = PyCObject_FromVoidPtr(ptr, destr); - if (!cobj) { - destr(ptr); - return -1; - } - if (PyList_Append(*freelist, cobj)) { - Py_DECREF(cobj); - return -1; - } - Py_DECREF(cobj); - return 0; + Py_buffer *buf = (Py_buffer *)ptr; + if (buf) { + PyBuffer_Release(buf); + } + return 0; } static int -cleanreturn(int retval, PyObject *freelist) +addcleanup(void *ptr, freelist_t *freelist, destr_t destructor) { - if (freelist && retval != 0) { - /* We were successful, reset the destructors so that they - don't get called. */ - Py_ssize_t len = PyList_GET_SIZE(freelist), i; - for (i = 0; i < len; i++) - ((PyCObject *) PyList_GET_ITEM(freelist, i)) - ->destructor = NULL; - } - Py_XDECREF(freelist); - return retval; + int index; + + index = freelist->first_available; + freelist->first_available += 1; + + freelist->entries[index].item = ptr; + freelist->entries[index].destructor = destructor; + + return 0; } +static int +cleanreturn(int retval, freelist_t *freelist) +{ + int index; + + if (retval == 0) { + /* A failure occurred, therefore execute all of the cleanup + functions. + */ + for (index = 0; index < freelist->first_available; ++index) { + freelist->entries[index].destructor(NULL, + freelist->entries[index].item); + } + } + PyMem_Free(freelist->entries); + return retval; +} static int vgetargs1(PyObject *args, const char *format, va_list *p_va, int flags) @@ -195,7 +211,7 @@ const char *formatsave = format; Py_ssize_t i, len; char *msg; - PyObject *freelist = NULL; + freelist_t freelist = {0, NULL}; int compat = flags & FLAG_COMPAT; assert(compat || (args != (PyObject*)NULL)); @@ -251,16 +267,18 @@ format = formatsave; + freelist.entries = PyMem_New(freelistentry_t, max); + if (compat) { if (max == 0) { if (args == NULL) - return 1; + return cleanreturn(1, &freelist); PyOS_snprintf(msgbuf, sizeof(msgbuf), "%.200s%s takes no arguments", fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } else if (min == 1 && max == 1) { if (args == NULL) { @@ -269,26 +287,26 @@ fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } msg = convertitem(args, &format, p_va, flags, levels, msgbuf, sizeof(msgbuf), &freelist); if (msg == NULL) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); seterror(levels[0], msg, levels+1, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } else { PyErr_SetString(PyExc_SystemError, "old style getargs format uses new features"); - return 0; + return cleanreturn(0, &freelist); } } if (!PyTuple_Check(args)) { PyErr_SetString(PyExc_SystemError, "new style getargs format but argument is not a tuple"); - return 0; + return cleanreturn(0, &freelist); } len = PyTuple_GET_SIZE(args); @@ -308,7 +326,7 @@ message = msgbuf; } PyErr_SetString(PyExc_TypeError, message); - return 0; + return cleanreturn(0, &freelist); } for (i = 0; i < len; i++) { @@ -319,7 +337,7 @@ sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -328,10 +346,10 @@ *format != '|' && *format != ':' && *format != ';') { PyErr_Format(PyExc_SystemError, "bad format string: %.200s", formatsave); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } @@ -395,7 +413,7 @@ static char * converttuple(PyObject *arg, const char **p_format, va_list *p_va, int flags, int *levels, char *msgbuf, size_t bufsize, int toplevel, - PyObject **freelist) + freelist_t *freelist) { int level = 0; int n = 0; @@ -472,7 +490,7 @@ static char * convertitem(PyObject *arg, const char **p_format, va_list *p_va, int flags, - int *levels, char *msgbuf, size_t bufsize, PyObject **freelist) + int *levels, char *msgbuf, size_t bufsize, freelist_t *freelist) { char *msg; const char *format = *p_format; @@ -539,7 +557,7 @@ static char * convertsimple(PyObject *arg, const char **p_format, va_list *p_va, int flags, - char *msgbuf, size_t bufsize, PyObject **freelist) + char *msgbuf, size_t bufsize, freelist_t *freelist) { /* For # codes */ #define FETCH_SIZE int *q=NULL;Py_ssize_t *q2=NULL;\ @@ -1501,7 +1519,9 @@ const char *fname, *msg, *custom_msg, *keyword; int min = INT_MAX; int i, len, nargs, nkeywords; - PyObject *freelist = NULL, *current_arg; + PyObject *current_arg; + freelist_t freelist = {0, NULL}; + assert(args != NULL && PyTuple_Check(args)); assert(keywords == NULL || PyDict_Check(keywords)); @@ -1525,6 +1545,8 @@ for (len=0; kwlist[len]; len++) continue; + freelist.entries = PyMem_New(freelistentry_t, len); + nargs = PyTuple_GET_SIZE(args); nkeywords = (keywords == NULL) ? 0 : PyDict_Size(keywords); if (nargs + nkeywords > len) { @@ -1535,7 +1557,7 @@ len, (len == 1) ? "" : "s", nargs + nkeywords); - return 0; + return cleanreturn(0, &freelist); } /* convert tuple args and keyword args in same loop, using kwlist to drive process */ @@ -1549,7 +1571,7 @@ PyErr_Format(PyExc_RuntimeError, "More keyword list entries (%d) than " "format specifiers (%d)", len, i); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } current_arg = NULL; if (nkeywords) { @@ -1563,11 +1585,11 @@ "Argument given by name ('%s') " "and position (%d)", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } else if (nkeywords && PyErr_Occurred()) - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); else if (i < nargs) current_arg = PyTuple_GET_ITEM(args, i); @@ -1576,7 +1598,7 @@ levels, msgbuf, sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, custom_msg); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } continue; } @@ -1585,14 +1607,14 @@ PyErr_Format(PyExc_TypeError, "Required argument " "'%s' (pos %d) not found", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* current code reports success when all required args * fulfilled and no keyword args left, with no further * validation. XXX Maybe skip this in debug build ? */ if (!nkeywords) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); /* We are into optional args, skip thru to any remaining * keyword args */ @@ -1600,7 +1622,7 @@ if (msg) { PyErr_Format(PyExc_RuntimeError, "%s: '%s'", msg, format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -1608,7 +1630,7 @@ PyErr_Format(PyExc_RuntimeError, "more argument specifiers than keyword list entries " "(remaining format:'%s')", format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* make sure there are no extraneous keyword arguments */ @@ -1621,7 +1643,7 @@ if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } ks = PyString_AsString(key); for (i = 0; i < len; i++) { @@ -1635,12 +1657,12 @@ "'%s' is an invalid keyword " "argument for this function", ks); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } diff --git a/pypy/module/cpyext/src/thread.c b/pypy/module/cpyext/src/thread.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/thread.c @@ -0,0 +1,313 @@ +#include +#include "pythread.h" + +/* ------------------------------------------------------------------------ +Per-thread data ("key") support. + +Use PyThread_create_key() to create a new key. This is typically shared +across threads. + +Use PyThread_set_key_value(thekey, value) to associate void* value with +thekey in the current thread. Each thread has a distinct mapping of thekey +to a void* value. Caution: if the current thread already has a mapping +for thekey, value is ignored. + +Use PyThread_get_key_value(thekey) to retrieve the void* value associated +with thekey in the current thread. This returns NULL if no value is +associated with thekey in the current thread. + +Use PyThread_delete_key_value(thekey) to forget the current thread's associated +value for thekey. PyThread_delete_key(thekey) forgets the values associated +with thekey across *all* threads. + +While some of these functions have error-return values, none set any +Python exception. + +None of the functions does memory management on behalf of the void* values. +You need to allocate and deallocate them yourself. If the void* values +happen to be PyObject*, these functions don't do refcount operations on +them either. + +The GIL does not need to be held when calling these functions; they supply +their own locking. This isn't true of PyThread_create_key(), though (see +next paragraph). + +There's a hidden assumption that PyThread_create_key() will be called before +any of the other functions are called. There's also a hidden assumption +that calls to PyThread_create_key() are serialized externally. +------------------------------------------------------------------------ */ + +#ifdef MS_WINDOWS +#include + +/* use native Windows TLS functions */ +#define Py_HAVE_NATIVE_TLS + +int +PyThread_create_key(void) +{ + return (int) TlsAlloc(); +} + +void +PyThread_delete_key(int key) +{ + TlsFree(key); +} + +/* We must be careful to emulate the strange semantics implemented in thread.c, + * where the value is only set if it hasn't been set before. + */ +int +PyThread_set_key_value(int key, void *value) +{ + BOOL ok; + void *oldvalue; + + assert(value != NULL); + oldvalue = TlsGetValue(key); + if (oldvalue != NULL) + /* ignore value if already set */ + return 0; + ok = TlsSetValue(key, value); + if (!ok) + return -1; + return 0; +} + +void * +PyThread_get_key_value(int key) +{ + /* because TLS is used in the Py_END_ALLOW_THREAD macro, + * it is necessary to preserve the windows error state, because + * it is assumed to be preserved across the call to the macro. + * Ideally, the macro should be fixed, but it is simpler to + * do it here. + */ + DWORD error = GetLastError(); + void *result = TlsGetValue(key); + SetLastError(error); + return result; +} + +void +PyThread_delete_key_value(int key) +{ + /* NULL is used as "key missing", and it is also the default + * given by TlsGetValue() if nothing has been set yet. + */ + TlsSetValue(key, NULL); +} + +/* reinitialization of TLS is not necessary after fork when using + * the native TLS functions. And forking isn't supported on Windows either. + */ +void +PyThread_ReInitTLS(void) +{} + +#else /* MS_WINDOWS */ + +/* A singly-linked list of struct key objects remembers all the key->value + * associations. File static keyhead heads the list. keymutex is used + * to enforce exclusion internally. + */ +struct key { + /* Next record in the list, or NULL if this is the last record. */ + struct key *next; + + /* The thread id, according to PyThread_get_thread_ident(). */ + long id; + + /* The key and its associated value. */ + int key; + void *value; +}; + +static struct key *keyhead = NULL; +static PyThread_type_lock keymutex = NULL; +static int nkeys = 0; /* PyThread_create_key() hands out nkeys+1 next */ + +/* Internal helper. + * If the current thread has a mapping for key, the appropriate struct key* + * is returned. NB: value is ignored in this case! + * If there is no mapping for key in the current thread, then: + * If value is NULL, NULL is returned. + * Else a mapping of key to value is created for the current thread, + * and a pointer to a new struct key* is returned; except that if + * malloc() can't find room for a new struct key*, NULL is returned. + * So when value==NULL, this acts like a pure lookup routine, and when + * value!=NULL, this acts like dict.setdefault(), returning an existing + * mapping if one exists, else creating a new mapping. + * + * Caution: this used to be too clever, trying to hold keymutex only + * around the "p->next = keyhead; keyhead = p" pair. That allowed + * another thread to mutate the list, via key deletion, concurrent with + * find_key() crawling over the list. Hilarity ensued. For example, when + * the for-loop here does "p = p->next", p could end up pointing at a + * record that PyThread_delete_key_value() was concurrently free()'ing. + * That could lead to anything, from failing to find a key that exists, to + * segfaults. Now we lock the whole routine. + */ +static struct key * +find_key(int key, void *value) +{ + struct key *p, *prev_p; + long id = PyThread_get_thread_ident(); + + if (!keymutex) + return NULL; + PyThread_acquire_lock(keymutex, 1); + prev_p = NULL; + for (p = keyhead; p != NULL; p = p->next) { + if (p->id == id && p->key == key) + goto Done; + /* Sanity check. These states should never happen but if + * they do we must abort. Otherwise we'll end up spinning in + * in a tight loop with the lock held. A similar check is done + * in pystate.c tstate_delete_common(). */ + if (p == prev_p) + Py_FatalError("tls find_key: small circular list(!)"); + prev_p = p; + if (p->next == keyhead) + Py_FatalError("tls find_key: circular list(!)"); + } + if (value == NULL) { + assert(p == NULL); + goto Done; + } + p = (struct key *)malloc(sizeof(struct key)); + if (p != NULL) { + p->id = id; + p->key = key; + p->value = value; + p->next = keyhead; + keyhead = p; + } + Done: + PyThread_release_lock(keymutex); + return p; +} + +/* Return a new key. This must be called before any other functions in + * this family, and callers must arrange to serialize calls to this + * function. No violations are detected. + */ +int +PyThread_create_key(void) +{ + /* All parts of this function are wrong if it's called by multiple + * threads simultaneously. + */ + if (keymutex == NULL) + keymutex = PyThread_allocate_lock(); + return ++nkeys; +} + +/* Forget the associations for key across *all* threads. */ +void +PyThread_delete_key(int key) +{ + struct key *p, **q; + + PyThread_acquire_lock(keymutex, 1); + q = &keyhead; + while ((p = *q) != NULL) { + if (p->key == key) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + } + else + q = &p->next; + } + PyThread_release_lock(keymutex); +} + +/* Confusing: If the current thread has an association for key, + * value is ignored, and 0 is returned. Else an attempt is made to create + * an association of key to value for the current thread. 0 is returned + * if that succeeds, but -1 is returned if there's not enough memory + * to create the association. value must not be NULL. + */ +int +PyThread_set_key_value(int key, void *value) +{ + struct key *p; + + assert(value != NULL); + p = find_key(key, value); + if (p == NULL) + return -1; + else + return 0; +} + +/* Retrieve the value associated with key in the current thread, or NULL + * if the current thread doesn't have an association for key. + */ +void * +PyThread_get_key_value(int key) +{ + struct key *p = find_key(key, NULL); + + if (p == NULL) + return NULL; + else + return p->value; +} + +/* Forget the current thread's association for key, if any. */ +void +PyThread_delete_key_value(int key) +{ + long id = PyThread_get_thread_ident(); + struct key *p, **q; + + PyThread_acquire_lock(keymutex, 1); + q = &keyhead; + while ((p = *q) != NULL) { + if (p->key == key && p->id == id) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + break; + } + else + q = &p->next; + } + PyThread_release_lock(keymutex); +} + +/* Forget everything not associated with the current thread id. + * This function is called from PyOS_AfterFork(). It is necessary + * because other thread ids which were in use at the time of the fork + * may be reused for new threads created in the forked process. + */ +void +PyThread_ReInitTLS(void) +{ + long id = PyThread_get_thread_ident(); + struct key *p, **q; + + if (!keymutex) + return; + + /* As with interpreter_lock in PyEval_ReInitThreads() + we just create a new lock without freeing the old one */ + keymutex = PyThread_allocate_lock(); + + /* Delete all keys which do not match the current thread id */ + q = &keyhead; + while ((p = *q) != NULL) { + if (p->id != id) { + *q = p->next; + free((void *)p); + /* NB This does *not* free p->value! */ + } + else + q = &p->next; + } +} + +#endif /* !MS_WINDOWS */ diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -130,6 +130,11 @@ @cpython_api([PyObject], rffi.CCHARP, error=0) def PyString_AsString(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + pass # typecheck returned "ok" without forcing 'ref' at all + elif not PyString_Check(space, ref): # otherwise, use the alternate way + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsString only support strings")) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -182,16 +182,6 @@ used as the positional and keyword parameters to the object's constructor.""" raise NotImplementedError - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_Check(space, co): - """Return true if co is a code object""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyCode_GetNumFree(space, co): - """Return the number of free variables in co.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1293,28 +1283,6 @@ that haven't been explicitly destroyed at that point.""" raise NotImplementedError - at cpython_api([rffi.VOIDP], lltype.Void) -def Py_AddPendingCall(space, func): - """Post a notification to the Python main thread. If successful, func will - be called with the argument arg at the earliest convenience. func will be - called having the global interpreter lock held and can thus use the full - Python API and can take any action such as setting object attributes to - signal IO completion. It must return 0 on success, or -1 signalling an - exception. The notification function won't be interrupted to perform another - asynchronous notification recursively, but it can still be interrupted to - switch threads if the global interpreter lock is released, for example, if it - calls back into Python code. - - This function returns 0 on success in which case the notification has been - scheduled. Otherwise, for example if the notification buffer is full, it - returns -1 without setting any exception. - - This function can be called on any thread, be it a Python thread or some - other system thread. If it is a Python thread, it doesn't matter if it holds - the global interpreter lock or not. - """ - raise NotImplementedError - @cpython_api([Py_tracefunc, PyObject], lltype.Void) def PyEval_SetProfile(space, func, obj): """Set the profiler function to func. The obj parameter is passed to the @@ -1875,26 +1843,6 @@ """ raise NotImplementedError - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISTITLE(space, ch): - """Return 1 or 0 depending on whether ch is a titlecase character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISDIGIT(space, ch): - """Return 1 or 0 depending on whether ch is a digit character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISNUMERIC(space, ch): - """Return 1 or 0 depending on whether ch is a numeric character.""" - raise NotImplementedError - - at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) -def Py_UNICODE_ISALPHA(space, ch): - """Return 1 or 0 depending on whether ch is an alphabetic character.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP], PyObject) def PyUnicode_FromFormat(space, format): """Take a C printf()-style format string and a variable number of @@ -2339,17 +2287,6 @@ use the default error handling.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1) -def PyUnicode_Tailmatch(space, str, substr, start, end, direction): - """Return 1 if substr matches str*[*start:end] at the given tail end - (direction == -1 means to do a prefix match, direction == 1 a suffix match), - 0 otherwise. Return -1 if an error occurred. - - This function used an int type for start and end. This - might require changes in your code for properly supporting 64-bit - systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2) def PyUnicode_Find(space, str, substr, start, end, direction): """Return the first position of substr in str*[*start:end] using the given @@ -2373,16 +2310,6 @@ properly supporting 64-bit systems.""" raise NotImplementedError - at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) -def PyUnicode_Replace(space, str, substr, replstr, maxcount): - """Replace at most maxcount occurrences of substr in str with replstr and - return the resulting Unicode object. maxcount == -1 means replace all - occurrences. - - This function used an int type for maxcount. This might - require changes in your code for properly supporting 64-bit systems.""" - raise NotImplementedError - @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyUnicode_RichCompare(space, left, right, op): """Rich compare two unicode strings and return one of the following: @@ -2556,17 +2483,6 @@ source code is read from fp instead of an in-memory string.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, PyCompilerFlags], PyObject) -def PyRun_StringFlags(space, str, start, globals, locals, flags): - """Execute Python source code from str in the context specified by the - dictionaries globals and locals with the compiler flags specified by - flags. The parameter start specifies the start token that should be used to - parse the source code. - - Returns the result of executing the code as a Python object, or NULL if an - exception was raised.""" - raise NotImplementedError - @cpython_api([FILE, rffi.CCHARP, rffi.INT_real, PyObject, PyObject, rffi.INT_real], PyObject) def PyRun_FileEx(space, fp, filename, start, globals, locals, closeit): """This is a simplified interface to PyRun_FileExFlags() below, leaving @@ -2587,13 +2503,6 @@ returns.""" raise NotImplementedError - at cpython_api([PyCodeObject, PyObject, PyObject], PyObject) -def PyEval_EvalCode(space, co, globals, locals): - """This is a simplified interface to PyEval_EvalCodeEx(), with just - the code object, and the dictionaries of global and local variables. - The other arguments are set to NULL.""" - raise NotImplementedError - @cpython_api([PyCodeObject, PyObject, PyObject, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObject], PyObject) def PyEval_EvalCodeEx(space, co, globals, locals, args, argcount, kws, kwcount, defs, defcount, closure): """Evaluate a precompiled code object, given a particular environment for its @@ -2618,12 +2527,6 @@ throw() methods of generator objects.""" raise NotImplementedError - at cpython_api([PyCompilerFlags], rffi.INT_real, error=CANNOT_FAIL) -def PyEval_MergeCompilerFlags(space, cf): - """This function changes the flags of the current evaluation frame, and returns - true on success, false on failure.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyWeakref_Check(space, ob): """Return true if ob is either a reference or proxy object. diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -38,3 +38,31 @@ def Py_MakePendingCalls(space): return 0 +pending_call = lltype.Ptr(lltype.FuncType([rffi.VOIDP], rffi.INT_real)) + at cpython_api([pending_call, rffi.VOIDP], rffi.INT_real, error=-1) +def Py_AddPendingCall(space, func, arg): + """Post a notification to the Python main thread. If successful, + func will be called with the argument arg at the earliest + convenience. func will be called having the global interpreter + lock held and can thus use the full Python API and can take any + action such as setting object attributes to signal IO completion. + It must return 0 on success, or -1 signalling an exception. The + notification function won't be interrupted to perform another + asynchronous notification recursively, but it can still be + interrupted to switch threads if the global interpreter lock is + released, for example, if it calls back into Python code. + + This function returns 0 on success in which case the notification + has been scheduled. Otherwise, for example if the notification + buffer is full, it returns -1 without setting any exception. + + This function can be called on any thread, be it a Python thread + or some other system thread. If it is a Python thread, it doesn't + matter if it holds the global interpreter lock or not. + """ + return -1 + +thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) + at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) +def PyThread_start_new_thread(space, func, arg): + return -1 diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,10 +106,7 @@ del obj import gc; gc.collect() - try: - del space.getexecutioncontext().cpyext_threadstate - except AttributeError: - pass + space.getexecutioncontext().cleanup_cpyext_state() for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -2,9 +2,10 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.eval import ( - Py_single_input, Py_file_input, Py_eval_input) + Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags) from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP from pypy.interpreter.gateway import interp2app +from pypy.interpreter.astcompiler import consts from pypy.tool.udir import udir import sys, os @@ -63,6 +64,22 @@ assert space.int_w(w_res) == 10 + def test_evalcode(self, space, api): + w_f = space.appexec([], """(): + def f(*args): + assert isinstance(args, tuple) + return len(args) + 8 + return f + """) + + w_t = space.newtuple([space.wrap(1), space.wrap(2)]) + w_globals = space.newdict() + w_locals = space.newdict() + space.setitem(w_locals, space.wrap("args"), w_t) + w_res = api.PyEval_EvalCode(w_f.code, w_globals, w_locals) + + assert space.int_w(w_res) == 10 + def test_run_simple_string(self, space, api): def run(code): buf = rffi.str2charp(code) @@ -96,6 +113,20 @@ assert 42 * 43 == space.unwrap( api.PyObject_GetItem(w_globals, space.wrap("a"))) + def test_run_string_flags(self, space, api): + flags = lltype.malloc(PyCompilerFlags, flavor='raw') + flags.c_cf_flags = rffi.cast(rffi.INT, consts.PyCF_SOURCE_IS_UTF8) + w_globals = space.newdict() + buf = rffi.str2charp("a = u'caf\xc3\xa9'") + try: + api.PyRun_StringFlags(buf, Py_single_input, + w_globals, w_globals, flags) + finally: + rffi.free_charp(buf) + w_a = space.getitem(w_globals, space.wrap("a")) + assert space.unwrap(w_a) == u'caf\xe9' + lltype.free(flags, flavor='raw') + def test_run_file(self, space, api): filepath = udir / "cpyext_test_runfile.py" filepath.write("raise ZeroDivisionError") @@ -256,3 +287,21 @@ print dir(mod) print mod.__dict__ assert mod.f(42) == 47 + + def test_merge_compiler_flags(self): + module = self.import_extension('foo', [ + ("get_flags", "METH_NOARGS", + """ + PyCompilerFlags flags; + flags.cf_flags = 0; + int result = PyEval_MergeCompilerFlags(&flags); + return Py_BuildValue("ii", result, flags.cf_flags); + """), + ]) + assert module.get_flags() == (0, 0) + + ns = {'module':module} + exec """from __future__ import division \nif 1: + def nested_flags(): + return module.get_flags()""" in ns + assert ns['nested_flags']() == (1, 0x2000) # CO_FUTURE_DIVISION diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -81,6 +81,14 @@ rffi.free_charp(filename) rffi.free_charp(funcname) + def test_getnumfree(self, space, api): + w_function = space.appexec([], """(): + a = 5 + def method(x): return a, x + return method + """) + assert api.PyCode_GetNumFree(w_function.code) == 1 + def test_classmethod(self, space, api): w_function = space.appexec([], """(): def method(x): return x diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -65,4 +65,97 @@ values = module.values() types = [type(x) for x in values] assert types == [int, long, int, int] - + + def test_int_subtype(self): + module = self.import_extension( + 'foo', [ + ("newEnum", "METH_VARARGS", + """ + EnumObject *enumObj; + long intval; + PyObject *name; + + if (!PyArg_ParseTuple(args, "Oi", &name, &intval)) + return NULL; + + PyType_Ready(&Enum_Type); + enumObj = PyObject_New(EnumObject, &Enum_Type); + if (!enumObj) { + return NULL; + } + + enumObj->ob_ival = intval; + Py_INCREF(name); + enumObj->ob_name = name; + + return (PyObject *)enumObj; + """), + ], + prologue=""" + typedef struct + { + PyObject_HEAD + long ob_ival; + PyObject* ob_name; + } EnumObject; + + static void + enum_dealloc(EnumObject *op) + { + Py_DECREF(op->ob_name); + Py_TYPE(op)->tp_free((PyObject *)op); + } + + static PyMemberDef enum_members[] = { + {"name", T_OBJECT, offsetof(EnumObject, ob_name), 0, NULL}, + {NULL} /* Sentinel */ + }; + + PyTypeObject Enum_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Enum", + /*tp_basicsize*/ sizeof(EnumObject), + /*tp_itemsize*/ 0, + /*tp_dealloc*/ enum_dealloc, + /*tp_print*/ 0, + /*tp_getattr*/ 0, + /*tp_setattr*/ 0, + /*tp_compare*/ 0, + /*tp_repr*/ 0, + /*tp_as_number*/ 0, + /*tp_as_sequence*/ 0, + /*tp_as_mapping*/ 0, + /*tp_hash*/ 0, + /*tp_call*/ 0, + /*tp_str*/ 0, + /*tp_getattro*/ 0, + /*tp_setattro*/ 0, + /*tp_as_buffer*/ 0, + /*tp_flags*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + /*tp_doc*/ 0, + /*tp_traverse*/ 0, + /*tp_clear*/ 0, + /*tp_richcompare*/ 0, + /*tp_weaklistoffset*/ 0, + /*tp_iter*/ 0, + /*tp_iternext*/ 0, + /*tp_methods*/ 0, + /*tp_members*/ enum_members, + /*tp_getset*/ 0, + /*tp_base*/ &PyInt_Type, + /*tp_dict*/ 0, + /*tp_descr_get*/ 0, + /*tp_descr_set*/ 0, + /*tp_dictoffset*/ 0, + /*tp_init*/ 0, + /*tp_alloc*/ 0, + /*tp_new*/ 0 + }; + """) + + a = module.newEnum("ULTIMATE_ANSWER", 42) + assert type(a).__name__ == "Enum" + assert isinstance(a, int) + assert a == int(a) == 42 + assert a.name == "ULTIMATE_ANSWER" diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -101,9 +101,9 @@ space.wrap((2, 7)))): py.test.skip("unsupported before Python 2.7") - assert api._PyLong_Sign(space.wrap(0L)) == 0 - assert api._PyLong_Sign(space.wrap(2L)) == 1 - assert api._PyLong_Sign(space.wrap(-2L)) == -1 + assert api._PyLong_Sign(space.wraplong(0L)) == 0 + assert api._PyLong_Sign(space.wraplong(2L)) == 1 + assert api._PyLong_Sign(space.wraplong(-2L)) == -1 assert api._PyLong_NumBits(space.wrap(0)) == 0 assert api._PyLong_NumBits(space.wrap(1)) == 1 diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -6,12 +6,12 @@ class TestIterator(BaseApiTest): def test_check(self, space, api): assert api.PyIndex_Check(space.wrap(12)) - assert api.PyIndex_Check(space.wrap(-12L)) + assert api.PyIndex_Check(space.wraplong(-12L)) assert not api.PyIndex_Check(space.wrap(12.1)) assert not api.PyIndex_Check(space.wrap('12')) assert api.PyNumber_Check(space.wrap(12)) - assert api.PyNumber_Check(space.wrap(-12L)) + assert api.PyNumber_Check(space.wraplong(-12L)) assert api.PyNumber_Check(space.wrap(12.1)) assert not api.PyNumber_Check(space.wrap('12')) assert not api.PyNumber_Check(space.wrap(1+3j)) @@ -21,7 +21,7 @@ assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): - w_l = api.PyNumber_Int(space.wrap(123L)) + w_l = api.PyNumber_Int(space.wraplong(123L)) assert api.PyInt_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(2 << 65)) assert api.PyLong_CheckExact(w_l) @@ -29,7 +29,7 @@ assert api.PyInt_CheckExact(w_l) def test_number_index(self, space, api): - w_l = api.PyNumber_Index(space.wrap(123L)) + w_l = api.PyNumber_Index(space.wraplong(123L)) assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Index(space.wrap(42.3)) assert w_l is None diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -3,6 +3,10 @@ from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState from pypy.module.cpyext.pyobject import from_ref +from pypy.rpython.lltypesystem import lltype +from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts +from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head +from pypy.tool import leakfinder class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -21,6 +25,93 @@ # Should compile at least module.test() + + def test_thread_state_get(self): + module = self.import_extension('foo', [ + ("get", "METH_NOARGS", + """ + PyThreadState *tstate = PyThreadState_Get(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + if (tstate->interp != PyInterpreterState_Head()) { + return PyLong_FromLong(1); + } + if (tstate->interp->next != NULL) { + return PyLong_FromLong(2); + } + return PyLong_FromLong(3); + """), + ]) + assert module.get() == 3 + + def test_basic_threadstate_dance(self): + module = self.import_extension('foo', [ + ("dance", "METH_NOARGS", + """ + PyThreadState *old_tstate, *new_tstate; + + old_tstate = PyThreadState_Swap(NULL); + if (old_tstate == NULL) { + return PyLong_FromLong(0); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != NULL) { + return PyLong_FromLong(1); + } + + new_tstate = PyThreadState_Swap(old_tstate); + if (new_tstate != NULL) { + return PyLong_FromLong(2); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != old_tstate) { + return PyLong_FromLong(3); + } + + return PyLong_FromLong(4); + """), + ]) + assert module.dance() == 4 + + def test_threadstate_dict(self): + module = self.import_extension('foo', [ + ("getdict", "METH_NOARGS", + """ + PyObject *dict = PyThreadState_GetDict(); + Py_INCREF(dict); + return dict; + """), + ]) + assert isinstance(module.getdict(), dict) + + def test_savethread(self): + module = self.import_extension('foo', [ + ("bounce", "METH_NOARGS", + """ + PyThreadState *tstate = PyEval_SaveThread(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + + if (PyThreadState_Get() != NULL) { + return PyLong_FromLong(1); + } + + PyEval_RestoreThread(tstate); + + if (PyThreadState_Get() != tstate) { + return PyLong_FromLong(2); + } + + return PyLong_FromLong(3); + """), + ]) + + + class TestInterpreterState(BaseApiTest): def test_interpreter_head(self, space, api): state = api.PyInterpreterState_Head() @@ -29,31 +120,3 @@ def test_interpreter_next(self, space, api): state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) - -class TestThreadState(BaseApiTest): - def test_thread_state_get(self, space, api): - ts = api.PyThreadState_Get() - assert ts != nullptr(PyThreadState.TO) - - def test_thread_state_interp(self, space, api): - ts = api.PyThreadState_Get() - assert ts.c_interp == api.PyInterpreterState_Head() - assert ts.c_interp.c_next == nullptr(PyInterpreterState.TO) - - def test_basic_threadstate_dance(self, space, api): - # Let extension modules call these functions, - # Not sure of the semantics in pypy though. - # (cpyext always acquires and releases the GIL around calls) - tstate = api.PyThreadState_Swap(None) - assert tstate is not None - assert not api.PyThreadState_Swap(tstate) - - api.PyEval_AcquireThread(tstate) - api.PyEval_ReleaseThread(tstate) - - def test_threadstate_dict(self, space, api): - ts = api.PyThreadState_Get() - ref = ts.c_dict - assert ref == api.PyThreadState_GetDict() - w_obj = from_ref(space, ref) - assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -105,6 +105,15 @@ )]) assert module.string_as_string("huheduwe") == "huhe" + def test_py_string_as_string_None(self): + module = self.import_extension('foo', [ + ("string_None", "METH_VARARGS", + ''' + return PyString_AsString(Py_None); + ''' + )]) + raises(TypeError, module.string_None) + def test_AsStringAndSize(self): module = self.import_extension('foo', [ ("getstring", "METH_NOARGS", diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -5,6 +5,7 @@ from pypy.module.thread.ll_thread import allocate_ll_lock from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class TestPyThread(BaseApiTest): @@ -38,3 +39,51 @@ api.PyThread_release_lock(lock) assert api.PyThread_acquire_lock(lock, 0) == 1 api.PyThread_free_lock(lock) + + +class AppTestThread(AppTestCpythonExtensionBase): + def test_tls(self): + module = self.import_extension('foo', [ + ("create_key", "METH_NOARGS", + """ + return PyInt_FromLong(PyThread_create_key()); + """), + ("test_key", "METH_O", + """ + int key = PyInt_AsLong(args); + if (PyThread_get_key_value(key) != NULL) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + if (PyThread_set_key_value(key, (void*)123) < 0) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + if (PyThread_get_key_value(key) != (void*)123) { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + Py_RETURN_NONE; + """), + ]) + key = module.create_key() + assert key > 0 + # Test value in main thread. + module.test_key(key) + raises(ValueError, module.test_key, key) + # Same test, in another thread. + result = [] + import thread, time + def in_thread(): + try: + module.test_key(key) + raises(ValueError, module.test_key, key) + except Exception, e: + result.append(e) + else: + result.append(True) + thread.start_new_thread(in_thread, ()) + while not result: + print "." + time.sleep(.5) + assert result == [True] diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -204,8 +204,18 @@ assert api.Py_UNICODE_ISSPACE(unichr(char)) assert not api.Py_UNICODE_ISSPACE(u'a') + assert api.Py_UNICODE_ISALPHA(u'a') + assert not api.Py_UNICODE_ISALPHA(u'0') + assert api.Py_UNICODE_ISALNUM(u'a') + assert api.Py_UNICODE_ISALNUM(u'0') + assert not api.Py_UNICODE_ISALNUM(u'+') + assert api.Py_UNICODE_ISDECIMAL(u'\u0660') assert not api.Py_UNICODE_ISDECIMAL(u'a') + assert api.Py_UNICODE_ISDIGIT(u'9') + assert not api.Py_UNICODE_ISDIGIT(u'@') + assert api.Py_UNICODE_ISNUMERIC(u'9') + assert not api.Py_UNICODE_ISNUMERIC(u'@') for char in [0x0a, 0x0d, 0x1c, 0x1d, 0x1e, 0x85, 0x2028, 0x2029]: assert api.Py_UNICODE_ISLINEBREAK(unichr(char)) @@ -216,6 +226,9 @@ assert not api.Py_UNICODE_ISUPPER(u'a') assert not api.Py_UNICODE_ISLOWER(u'�') assert api.Py_UNICODE_ISUPPER(u'�') + assert not api.Py_UNICODE_ISTITLE(u'A') + assert api.Py_UNICODE_ISTITLE( + u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}') def test_TOLOWER(self, space, api): assert api.Py_UNICODE_TOLOWER(u'�') == u'�' @@ -429,3 +442,18 @@ w_char = api.PyUnicode_FromOrdinal(0xFFFF) assert space.unwrap(w_char) == u'\uFFFF' + def test_replace(self, space, api): + w_str = space.wrap(u"abababab") + w_substr = space.wrap(u"a") + w_replstr = space.wrap(u"z") + assert u"zbzbabab" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2)) + assert u"zbzbzbzb" == space.unwrap( + api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1)) + + def test_tailmatch(self, space, api): + w_str = space.wrap(u"abcdef") + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 10, 1) == 1 + assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1 + self.raises(space, api, TypeError, + api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject, unicodetype, stringtype from pypy.rlib import runicode from pypy.tool.sourcetools import func_renamer import sys @@ -89,6 +89,11 @@ return unicodedb.isspace(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISALPHA(space, ch): + """Return 1 or 0 depending on whether ch is an alphabetic character.""" + return unicodedb.isalpha(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALNUM(space, ch): """Return 1 or 0 depending on whether ch is an alphanumeric character.""" return unicodedb.isalnum(ord(ch)) @@ -104,6 +109,16 @@ return unicodedb.isdecimal(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISDIGIT(space, ch): + """Return 1 or 0 depending on whether ch is a digit character.""" + return unicodedb.isdigit(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISNUMERIC(space, ch): + """Return 1 or 0 depending on whether ch is a numeric character.""" + return unicodedb.isnumeric(ord(ch)) + + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLOWER(space, ch): """Return 1 or 0 depending on whether ch is a lowercase character.""" return unicodedb.islower(ord(ch)) @@ -113,6 +128,11 @@ """Return 1 or 0 depending on whether ch is an uppercase character.""" return unicodedb.isupper(ord(ch)) + at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) +def Py_UNICODE_ISTITLE(space, ch): + """Return 1 or 0 depending on whether ch is a titlecase character.""" + return unicodedb.istitle(ord(ch)) + @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" @@ -155,6 +175,11 @@ except KeyError: return -1.0 + at cpython_api([], Py_UNICODE, error=CANNOT_FAIL) +def PyUnicode_GetMax(space): + """Get the maximum ordinal for a Unicode character.""" + return runicode.UNICHR(runicode.MAXUNICODE) + @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): """Return a pointer to the internal buffer of the object. o has to be a @@ -548,6 +573,28 @@ @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Join(space, w_sep, w_seq): - """Join a sequence of strings using the given separator and return the resulting - Unicode string.""" + """Join a sequence of strings using the given separator and return + the resulting Unicode string.""" return space.call_method(w_sep, 'join', w_seq) + + at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) +def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount): + """Replace at most maxcount occurrences of substr in str with replstr and + return the resulting Unicode object. maxcount == -1 means replace all + occurrences.""" + return space.call_method(w_str, "replace", w_substr, w_replstr, + space.wrap(maxcount)) + + at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], + rffi.INT_real, error=-1) +def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction): + """Return 1 if substr matches str[start:end] at the given tail end + (direction == -1 means to do a prefix match, direction == 1 a + suffix match), 0 otherwise. Return -1 if an error occurred.""" + str = space.unicode_w(w_str) + substr = space.unicode_w(w_substr) + if rffi.cast(lltype.Signed, direction) >= 0: + return stringtype.stringstartswith(str, substr, start, end) + else: + return stringtype.stringendswith(str, substr, start, end) + diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -1,10 +1,11 @@ from pypy.module.imp import importing from pypy.module._file.interp_file import W_File from pypy.rlib import streamio +from pypy.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec -from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.interpreter.streamutil import wrap_streamerror def get_suffixes(space): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -357,7 +357,7 @@ def test_cannot_write_pyc(self): import sys, os - p = os.path.join(sys.path[-1], 'readonly') + p = os.path.join(sys.path[0], 'readonly') try: os.chmod(p, 0555) except: diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -327,8 +327,10 @@ # %r not supported in rpython #u.raise_exc('invalid typecode in unmarshal: %r' % tc) c = ord(tc) - if c < 32 or c > 126: - s = '\\x' + hex(c) + if c < 16: + s = '\\x0%x' % c + elif c < 32 or c > 126: + s = '\\x%x' % c elif tc == '\\': s = r'\\' else: diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -174,6 +174,11 @@ pass raises(ValueError, marshal.dumps, subtype) + def test_bad_typecode(self): + import marshal + exc = raises(ValueError, marshal.loads, chr(1)) + assert r"'\x01'" in exc.value.message + class AppTestRope(AppTestMarshal): def setup_class(cls): diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py --- a/pypy/module/math/test/test_direct.py +++ b/pypy/module/math/test/test_direct.py @@ -55,6 +55,15 @@ ('frexp', (-1.25,), lambda x: x == (-0.625, 1)), ('modf', (4.25,), lambda x: x == (0.25, 4.0)), ('modf', (-4.25,), lambda x: x == (-0.25, -4.0)), + ('copysign', (1.5, 0.0), 1.5), + ('copysign', (1.5, -0.0), -1.5), + ('copysign', (1.5, INFINITY), 1.5), + ('copysign', (1.5, -INFINITY), -1.5), + ] + if sys.platform != 'win32': # all NaNs seem to be negative there...? + IRREGCASES += [ + ('copysign', (1.5, NAN), 1.5), + ('copysign', (1.75, -NAN), -1.75), # special case for -NAN here ] OVFCASES = [ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.conftest import gettestobjspace from pypy.module.math.test import test_direct @@ -268,3 +269,7 @@ def __trunc__(self): return "truncated" assert math.trunc(foo()) == "truncated" + + def test_copysign_nan(self): + import math + assert math.copysign(1.0, float('-nan')) == -1.0 diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -37,26 +37,44 @@ 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', + 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'generic': 'interp_boxes.W_GenericBox', 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', + 'bool8': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'byte': 'interp_boxes.W_Int8Box', 'uint8': 'interp_boxes.W_UInt8Box', + 'ubyte': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'short': 'interp_boxes.W_Int16Box', 'uint16': 'interp_boxes.W_UInt16Box', + 'ushort': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'intc': 'interp_boxes.W_Int32Box', 'uint32': 'interp_boxes.W_UInt32Box', + 'uintc': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', 'uint64': 'interp_boxes.W_UInt64Box', + 'longlong': 'interp_boxes.W_LongLongBox', + 'ulonglong': 'interp_boxes.W_ULongLongBox', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', 'float_': 'interp_boxes.W_Float64Box', 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', + 'intp': 'types.IntP.BoxType', + 'uintp': 'types.UIntP.BoxType', + 'flexible': 'interp_boxes.W_FlexibleBox', + 'character': 'interp_boxes.W_CharacterBox', + 'str_': 'interp_boxes.W_StringBox', + 'unicode_': 'interp_boxes.W_UnicodeBox', + 'void': 'interp_boxes.W_VoidBox', } # ufuncs @@ -67,6 +85,7 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arctan2", "arctan2"), ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), @@ -77,7 +96,10 @@ ("true_divide", "true_divide"), ("equal", "equal"), ("exp", "exp"), + ("exp2", "exp2"), + ("expm1", "expm1"), ("fabs", "fabs"), + ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), ("greater", "greater"), @@ -89,8 +111,13 @@ ("multiply", "multiply"), ("negative", "negative"), ("not_equal", "not_equal"), + ("radians", "radians"), + ("degrees", "degrees"), + ("deg2rad", "radians"), + ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), ("sign", "sign"), + ("signbit", "signbit"), ("sin", "sin"), ("sinh", "sinh"), ("subtract", "subtract"), @@ -103,10 +130,21 @@ ('bitwise_not', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), + ('isneginf', 'isneginf'), + ('isposinf', 'isposinf'), + ('isfinite', 'isfinite'), ('logical_and', 'logical_and'), ('logical_xor', 'logical_xor'), ('logical_not', 'logical_not'), ('logical_or', 'logical_or'), + ('log', 'log'), + ('log2', 'log2'), + ('log10', 'log10'), + ('log1p', 'log1p'), + ('power', 'power'), + ('floor_divide', 'floor_divide'), + ('logaddexp', 'logaddexp'), + ('logaddexp2', 'logaddexp2'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -33,7 +33,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat"] + "unegative", "flat", "tostring"] TWO_ARG_FUNCTIONS = ["dot", 'take'] class FakeSpace(object): @@ -51,6 +51,8 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" + w_unicode = "unicode" def __init__(self): """NOT_RPYTHON""" @@ -91,8 +93,12 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, long): + return LongObject(obj) elif isinstance(obj, W_Root): return obj + elif isinstance(obj, str): + return StringObject(obj) raise NotImplementedError def newlist(self, items): @@ -120,6 +126,11 @@ return int(w_obj.floatval) raise NotImplementedError + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj.v + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj @@ -151,7 +162,13 @@ return instantiate(klass) def newtuple(self, list_w): - raise ValueError + return ListObject(list_w) + + def newdict(self): + return {} + + def setitem(self, dict, item, value): + dict[item] = value def len_w(self, w_obj): if isinstance(w_obj, ListObject): @@ -178,6 +195,11 @@ def __init__(self, intval): self.intval = intval +class LongObject(W_Root): + tp = FakeSpace.w_long + def __init__(self, intval): + self.intval = intval + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): @@ -190,6 +212,11 @@ self.stop = stop self.step = step +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, v): + self.v = v + class InterpreterState(object): def __init__(self, code): self.code = code @@ -407,6 +434,9 @@ w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) + elif self.name == "tostring": + arr.descr_tostring(interp.space) + w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,24 +1,25 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.error import operationerrfmt, OperationError +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.stringtype import str_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () def new_dtype_getter(name): - def get_dtype(space): + def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return getattr(get_dtype_cache(space), "w_%sdtype" % name) def new(space, w_subtype, w_value): - dtype = get_dtype(space) + dtype = _get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -29,7 +30,6 @@ def convert_to(self, dtype): return dtype.box(self.value) - class W_GenericBox(Wrappable): _attrs_ = () @@ -38,19 +38,22 @@ w_subtype.getname(space, '?') ) + def get_dtype(self, space): + return self._get_dtype(space) + def descr_str(self, space): - return self.descr_repr(space) - - def descr_repr(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + def descr_format(self, space, w_spec): + return space.format(self.item(space), w_spec) + def descr_int(self, space): - box = self.convert_to(W_LongBox.get_dtype(space)) + box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box.get_dtype(space)) + box = self.convert_to(W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -81,6 +84,7 @@ descr_mul = _binop_impl("multiply") descr_div = _binop_impl("divide") descr_truediv = _binop_impl("true_divide") + descr_floordiv = _binop_impl("floor_divide") descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") descr_lshift = _binop_impl("left_shift") @@ -101,6 +105,7 @@ descr_rmul = _binop_right_impl("multiply") descr_rdiv = _binop_right_impl("divide") descr_rtruediv = _binop_right_impl("true_divide") + descr_rfloordiv = _binop_right_impl("floor_divide") descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") descr_rlshift = _binop_right_impl("left_shift") @@ -129,7 +134,7 @@ class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("bool") + descr__new__, _get_dtype = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): _attrs_ = () @@ -145,34 +150,40 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int8") + descr__new__, _get_dtype = new_dtype_getter("int8") class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint8") + descr__new__, _get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int16") + descr__new__, _get_dtype = new_dtype_getter("int16") class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint16") + descr__new__, _get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int32") + descr__new__, _get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint32") + descr__new__, _get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("long") + descr__new__, _get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("ulong") + descr__new__, _get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int64") + descr__new__, _get_dtype = new_dtype_getter("int64") + +class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('longlong') class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint64") + descr__new__, _get_dtype = new_dtype_getter("uint64") + +class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): _attrs_ = () @@ -181,12 +192,71 @@ _attrs_ = () class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float32") + descr__new__, _get_dtype = new_dtype_getter("float32") class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float64") + descr__new__, _get_dtype = new_dtype_getter("float64") +class W_FlexibleBox(W_GenericBox): + def __init__(self, arr, ofs, dtype): + self.arr = arr # we have to keep array alive + self.ofs = ofs + self.dtype = dtype + + def get_dtype(self, space): + return self.arr.dtype + + at unwrap_spec(self=W_GenericBox) +def descr_index(space, self): + return space.index(self.item(space)) + +class W_VoidBox(W_FlexibleBox): + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) + + @unwrap_spec(item=str) + def descr_setitem(self, space, item, w_value): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + dtype.itemtype.store(self.arr, 1, self.ofs, ofs, + dtype.coerce(space, w_value)) + +class W_CharacterBox(W_FlexibleBox): + pass + +class W_StringBox(W_CharacterBox): + def descr__new__string_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_string_dtype + + arg = space.str_w(space.str(w_arg)) + arr = W_NDimArray([1], new_string_dtype(space, len(arg))) + for i in range(len(arg)): + arr.storage[i] = arg[i] + return W_StringBox(arr, 0, arr.dtype) + + +class W_UnicodeBox(W_CharacterBox): + def descr__new__unicode_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + + arg = space.unicode_w(unicode_from_object(space, w_arg)) + arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -194,7 +264,8 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __str__ = interp2app(W_GenericBox.descr_str), - __repr__ = interp2app(W_GenericBox.descr_repr), + __repr__ = interp2app(W_GenericBox.descr_str), + __format__ = interp2app(W_GenericBox.descr_format), __int__ = interp2app(W_GenericBox.descr_int), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), @@ -204,6 +275,7 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), __truediv__ = interp2app(W_GenericBox.descr_truediv), + __floordiv__ = interp2app(W_GenericBox.descr_floordiv), __mod__ = interp2app(W_GenericBox.descr_mod), __divmod__ = interp2app(W_GenericBox.descr_divmod), __pow__ = interp2app(W_GenericBox.descr_pow), @@ -218,6 +290,7 @@ __rmul__ = interp2app(W_GenericBox.descr_rmul), __rdiv__ = interp2app(W_GenericBox.descr_rdiv), __rtruediv__ = interp2app(W_GenericBox.descr_rtruediv), + __rfloordiv__ = interp2app(W_GenericBox.descr_rfloordiv), __rmod__ = interp2app(W_GenericBox.descr_rmod), __rdivmod__ = interp2app(W_GenericBox.descr_rdivmod), __rpow__ = interp2app(W_GenericBox.descr_rpow), @@ -245,6 +318,8 @@ W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), + + __index__ = interp2app(descr_index), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, @@ -266,36 +341,43 @@ W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt8Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt16Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __module__ = "numpypy", __new__ = interp2app(W_Int32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt32Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __module__ = "numpypy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) if LONG_BIT == 32: @@ -308,6 +390,7 @@ W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), + __index__ = interp2app(descr_index), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, @@ -330,3 +413,28 @@ __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) + +W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, + __module__ = "numpypy", + __getitem__ = interp2app(W_VoidBox.descr_getitem), + __setitem__ = interp2app(W_VoidBox.descr_setitem), +) + +W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, + __module__ = "numpypy", +) + +W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), +) + +W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), +) + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,26 +1,29 @@ + +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.module.micronumpy import types, interp_boxes from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" - - -VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) +VOIDLTR = 'V' +STRINGLTR = 'S' +UNICODELTR = 'U' class W_Dtype(Wrappable): _immutable_fields_ = ["itemtype", "num", "kind"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + alternate_constructors=[], aliases=[], + fields=None, fieldnames=None, native=True): self.itemtype = itemtype self.num = num self.kind = kind @@ -29,53 +32,28 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases - - def malloc(self, length): - # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - ) + self.fields = fields + self.fieldnames = fieldnames + self.native = native @specialize.argtype(1) def box(self, value): return self.itemtype.box(value) def coerce(self, space, w_item): - return self.itemtype.coerce(space, w_item) + return self.itemtype.coerce(space, self, w_item) - def getitem(self, storage, i): - return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + def getitem(self, arr, i): + return self.itemtype.read(arr, 1, i, 0) - def getitem_bool(self, storage, i): - isize = self.itemtype.get_element_size() - return self.itemtype.read_bool(storage, isize, i, 0) + def getitem_bool(self, arr, i): + return self.itemtype.read_bool(arr, 1, i, 0) - def setitem(self, storage, i, box): - self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + def setitem(self, arr, i, box): + self.itemtype.store(arr, 1, i, 0, box) def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) - - def descr__new__(space, w_subtype, w_dtype): - cache = get_dtype_cache(space) - - if space.is_w(w_dtype, space.w_None): - return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): - name = space.str_w(w_dtype) - for dtype in cache.builtin_dtypes: - if dtype.name == name or dtype.char == name or name in dtype.aliases: - return dtype - else: - for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: - return dtype - if w_dtype is dtype.w_box_type: - return dtype - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) def descr_str(self, space): return space.wrap(self.name) @@ -86,6 +64,14 @@ def descr_get_itemsize(self, space): return space.wrap(self.itemtype.get_element_size()) + def descr_get_byteorder(self, space): + if self.native: + return space.wrap('=') + return space.wrap(nonnative_byteorder_prefix) + + def descr_get_alignment(self, space): + return space.wrap(self.itemtype.alignment) + def descr_get_shape(self, space): return space.newtuple([]) @@ -99,31 +85,193 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_get_fields(self, space): + if self.fields is None: + return space.w_None + w_d = space.newdict() + for name, (offset, subdtype) in self.fields.iteritems(): + space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, + space.wrap(offset)])) + return w_d + + def descr_get_names(self, space): + if self.fieldnames is None: + return space.w_None + return space.newtuple([space.wrap(name) for name in self.fieldnames]) + + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + if self.fields is None: + raise OperationError(space.w_KeyError, space.wrap("There are no keys in dtypes %s" % self.name)) + try: + return self.fields[item][1] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap("Field named %s not found" % item)) + def is_int_type(self): return (self.kind == SIGNEDLTR or self.kind == UNSIGNEDLTR or self.kind == BOOLLTR) + def is_signed(self): + return self.kind == SIGNEDLTR + def is_bool_type(self): return self.kind == BOOLLTR + def is_record_type(self): + return self.fields is not None + + def __repr__(self): + if self.fields is not None: + return '' % self.fields + return '' % self.itemtype + + def get_size(self): + return self.itemtype.get_element_size() + +def dtype_from_list(space, w_lst): + lst_w = space.listview(w_lst) + fields = {} + offset = 0 + ofs_and_items = [] + fieldnames = [] + for w_elem in lst_w: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc) + fldname = space.str_w(w_fldname) + if fldname in fields: + raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + assert isinstance(subdtype, W_Dtype) + fields[fldname] = (offset, subdtype) + ofs_and_items.append((offset, subdtype.itemtype)) + offset += subdtype.itemtype.get_element_size() + fieldnames.append(fldname) + itemtype = types.RecordType(ofs_and_items, offset) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()), + "V", space.gettypefor(interp_boxes.W_VoidBox), fields=fields, + fieldnames=fieldnames) + +def dtype_from_dict(space, w_dict): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from dict")) + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + w_box_type = space.gettypefor(interp_boxes.W_VoidBox) + raise OperationError(space.w_NotImplementedError, space.wrap( + "pure void dtype")) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) + +def dtype_from_spec(space, name): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from spec")) + +def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + + if space.is_w(w_dtype, space.w_None): + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_str): + name = space.str_w(w_dtype) + if ',' in name: + return dtype_from_spec(space, name) + try: + return cache.dtypes_by_name[name] + except KeyError: + pass + if name[0] in 'VSU' or name[0] in '<>=' and name[1] in 'VSU': + return variable_dtype(space, name) + elif space.isinstance_w(w_dtype, space.w_list): + return dtype_from_list(space, w_dtype) + elif space.isinstance_w(w_dtype, space.w_dict): + return dtype_from_dict(space, w_dtype) + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpypy", - __new__ = interp2app(W_Dtype.descr__new__.im_func), + __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __getitem__ = interp2app(W_Dtype.descr_getitem), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), + char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), + fields = GetSetProperty(W_Dtype.descr_get_fields), + names = GetSetProperty(W_Dtype.descr_get_names), ) W_Dtype.typedef.acceptable_as_base_class = False +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' + +def new_string_dtype(space, size): + return W_Dtype( + types.StringType(size), + num=18, + kind=STRINGLTR, + name='string', + char='S' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + ) + +def new_unicode_dtype(space, size): + return W_Dtype( + types.UnicodeType(size), + num=19, + kind=UNICODELTR, + name='unicode', + char='U' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( @@ -211,7 +359,6 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -239,18 +386,149 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - + self.w_longlongdtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name='int64', + char='q', + w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), + alternate_constructors=[space.w_long], + ) + self.w_ulonglongdtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name='uint64', + char='Q', + w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), + ) + self.w_stringdtype = W_Dtype( + types.StringType(1), + num=18, + kind=STRINGLTR, + name='string', + char='S', + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + alternate_constructors=[space.w_str], + ) + self.w_unicodedtype = W_Dtype( + types.UnicodeType(1), + num=19, + kind=UNICODELTR, + name='unicode', + char='U', + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + alternate_constructors=[space.w_unicode], + ) + self.w_voiddtype = W_Dtype( + types.VoidType(0), + num=20, + kind=VOIDLTR, + name='void', + char='V', + w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + #alternate_constructors=[space.w_buffer], + # XXX no buffer in space + ) self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, - self.w_float64dtype + self.w_longlongdtype, self.w_ulonglongdtype, + self.w_float32dtype, + self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, + self.w_voiddtype, ] self.dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) for dtype in self.builtin_dtypes ) + self.dtypes_by_name = {} + for dtype in self.builtin_dtypes: + self.dtypes_by_name[dtype.name] = dtype + can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + self.dtypes_by_name[can_name] = dtype + self.dtypes_by_name[byteorder_prefix + can_name] = dtype + self.dtypes_by_name['=' + can_name] = dtype + new_name = nonnative_byteorder_prefix + can_name + itemtypename = dtype.itemtype.__class__.__name__ + itemtype = getattr(types, 'NonNative' + itemtypename)() + self.dtypes_by_name[new_name] = W_Dtype( + itemtype, + dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, + native=False) + for alias in dtype.aliases: + self.dtypes_by_name[alias] = dtype + self.dtypes_by_name[dtype.char] = dtype + + typeinfo_full = { + 'LONGLONG': self.w_int64dtype, + 'SHORT': self.w_int16dtype, + 'VOID': self.w_voiddtype, + #'LONGDOUBLE':, + 'UBYTE': self.w_uint8dtype, + 'UINTP': self.w_ulongdtype, + 'ULONG': self.w_ulongdtype, + 'LONG': self.w_longdtype, + 'UNICODE': self.w_unicodedtype, + #'OBJECT', + 'ULONGLONG': self.w_ulonglongdtype, + 'STRING': self.w_stringdtype, + #'CDOUBLE', + #'DATETIME', + 'UINT': self.w_uint32dtype, + 'INTP': self.w_longdtype, + #'HALF', + 'BYTE': self.w_int8dtype, + #'CFLOAT': , + #'TIMEDELTA', + 'INT': self.w_int32dtype, + 'DOUBLE': self.w_float64dtype, + 'USHORT': self.w_uint16dtype, + 'FLOAT': self.w_float32dtype, + 'BOOL': self.w_booldtype, + #, 'CLONGDOUBLE'] + } + typeinfo_partial = { + 'Generic': interp_boxes.W_GenericBox, + 'Character': interp_boxes.W_CharacterBox, + 'Flexible': interp_boxes.W_FlexibleBox, + 'Inexact': interp_boxes.W_InexactBox, + 'Integer': interp_boxes.W_IntegerBox, + 'SignedInteger': interp_boxes.W_SignedIntegerBox, + 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, + #'ComplexFloating', + 'Number': interp_boxes.W_NumberBox, + 'Floating': interp_boxes.W_FloatingBox + } + w_typeinfo = space.newdict() + for k, v in typeinfo_partial.iteritems(): + space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) + for k, dtype in typeinfo_full.iteritems(): + itemsize = dtype.itemtype.get_element_size() + items_w = [space.wrap(dtype.char), + space.wrap(dtype.num), + space.wrap(itemsize * 8), # in case of changing + # number of bits per byte in the future + space.wrap(itemsize or 1)] + if dtype.is_int_type(): + if dtype.kind == BOOLLTR: + w_maxobj = space.wrap(1) + w_minobj = space.wrap(0) + elif dtype.is_signed(): + w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + else: + w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_minobj = space.wrap(0) + items_w = items_w + [w_maxobj, w_minobj] + items_w = items_w + [dtype.w_box_type] + + w_tuple = space.newtuple(items_w) + space.setitem(w_typeinfo, space.wrap(k), w_tuple) + self.w_typeinfo = w_typeinfo def get_dtype_cache(space): return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,7 +2,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate from pypy.module.micronumpy.strides import calculate_broadcast_strides,\ - calculate_slice_strides, calculate_dot_strides + calculate_slice_strides, calculate_dot_strides, enumerate_chunks """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see @@ -42,28 +42,81 @@ we can go faster. All the calculations happen in next() -next_step_x() tries to do the iteration for a number of steps at once, +next_skip_x() tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ # structures to describe slicing -class Chunk(object): +class BaseChunk(object): + pass + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + + arr = arr.get_concrete() + ofs, subdtype = arr.dtype.fields[self.name] + # strides backstrides are identical, ofs only changes start + return W_NDimSlice(arr.start + ofs, arr.strides[:], arr.backstrides[:], + arr.shape[:], arr, subdtype) + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice,\ + VirtualSlice, ConcreteArray + + shape = self.extend_shape(arr.shape) + if not isinstance(arr, ConcreteArray): + return VirtualSlice(arr, self, shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.strides, + arr.backstrides, self.l) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], arr) + + +class Chunk(BaseChunk): + axis_step = 1 + def __init__(self, start, stop, step, lgt): self.start = start self.stop = stop self.step = step self.lgt = lgt - def extend_shape(self, shape): - if self.step != 0: - shape.append(self.lgt) - def __repr__(self): return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, self.lgt) +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + class BaseTransform(object): pass @@ -95,17 +148,19 @@ raise NotImplementedError class ArrayIterator(BaseIterator): - def __init__(self, size): + def __init__(self, size, element_size): self.offset = 0 self.size = size + self.element_size = element_size def next(self, shapelen): return self.next_skip_x(1) - def next_skip_x(self, ofs): + def next_skip_x(self, x): arr = instantiate(ArrayIterator) arr.size = self.size - arr.offset = self.offset + ofs + arr.offset = self.offset + x * self.element_size + arr.element_size = self.element_size return arr def next_no_increase(self, shapelen): @@ -152,7 +207,7 @@ elif isinstance(t, ViewTransform): r = calculate_slice_strides(self.res_shape, self.offset, self.strides, - self.backstrides, t.chunks) + self.backstrides, t.chunks.l) return ViewIterator(r[1], r[2], r[3], r[0]) @jit.unroll_safe diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,10 +7,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, ViewIterator) -from pypy.module.micronumpy.strides import (calculate_slice_strides, - shape_agreement, find_shape_and_elems, get_shape_from_iterable, - calc_new_strides, to_coords) + SkipLastAxisIterator, Chunk, ViewIterator, Chunks, RecordChunk, + NewAxisChunk) +from pypy.module.micronumpy.strides import (shape_agreement, + find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi @@ -47,7 +47,7 @@ ) flat_set_driver = jit.JitDriver( greens=['shapelen', 'base'], - reds=['step', 'ai', 'lngth', 'arr', 'basei'], + reds=['step', 'lngth', 'ri', 'arr', 'basei'], name='numpy_flatset', ) @@ -79,8 +79,8 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + shape = _find_shape(space, w_size) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): def impl(self, space): @@ -102,6 +102,7 @@ descr_mul = _binop_impl("multiply") descr_div = _binop_impl("divide") descr_truediv = _binop_impl("true_divide") + descr_floordiv = _binop_impl("floor_divide") descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") descr_lshift = _binop_impl("left_shift") @@ -136,6 +137,7 @@ descr_rmul = _binop_right_impl("multiply") descr_rdiv = _binop_right_impl("divide") descr_rtruediv = _binop_right_impl("true_divide") + descr_rfloordiv = _binop_right_impl("floor_divide") descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") descr_rlshift = _binop_right_impl("left_shift") @@ -223,8 +225,7 @@ return scalar_w(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - out_size = support.product(out_shape) - result = W_NDimArray(out_size, out_shape, dtype) + result = W_NDimArray(out_shape, dtype) # This is the place to add fpypy and blas return multidim_dot(space, self.get_concrete(), other.get_concrete(), result, dtype, @@ -243,7 +244,7 @@ return space.wrap(self.find_dtype().itemtype.get_element_size()) def descr_get_nbytes(self, space): - return space.wrap(self.size * self.find_dtype().itemtype.get_element_size()) + return space.wrap(self.size) @jit.unroll_safe def descr_get_shape(self, space): @@ -251,13 +252,16 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.size, w_iterable) + support.product(self.shape), w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) + + def get_size(self): + return self.size // self.find_dtype().get_size() def descr_copy(self, space): return self.copy(space) @@ -277,7 +281,7 @@ def empty_copy(self, space, dtype): shape = self.shape - return W_NDimArray(support.product(shape), shape[:], dtype, 'C') + return W_NDimArray(shape[:], dtype, 'C') def descr_len(self, space): if len(self.shape): @@ -318,7 +322,16 @@ """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape """ + if space.isinstance_w(w_idx, space.w_str): + return False shape_len = len(self.shape) + if space.isinstance_w(w_idx, space.w_tuple): + for w_item in space.fixedview(w_idx): + if (space.isinstance_w(w_item, space.w_slice) or + space.is_w(w_item, space.w_None)): + return False + elif space.is_w(w_idx, space.w_None): + return False if shape_len == 0: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -334,43 +347,55 @@ if lgt > shape_len: raise OperationError(space.w_IndexError, space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True + return lgt == shape_len @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_str): + idx = space.str_w(w_idx) + dtype = self.find_dtype() + if not dtype.is_record_type() or idx not in dtype.fields: + raise OperationError(space.w_ValueError, space.wrap( + "field named %s not defined" % idx)) + return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): - return [Chunk(*space.decode_index4(w_idx, self.shape[0]))] - return [Chunk(*space.decode_index4(w_item, self.shape[i])) for i, w_item in - enumerate(space.fixedview(w_idx))] + return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))]) + elif space.is_w(w_idx, space.w_None): + return Chunks([NewAxisChunk()]) + result = [] + i = 0 + for w_item in space.fixedview(w_idx): + if space.is_w(w_item, space.w_None): + result.append(NewAxisChunk()) + else: + result.append(Chunk(*space.decode_index4(w_item, + self.shape[i]))) + i += 1 + return Chunks(result) - def count_all_true(self, arr): - sig = arr.find_sig() - frame = sig.create_frame(arr) - shapelen = len(arr.shape) + def count_all_true(self): + sig = self.find_sig() + frame = sig.create_frame(self) + shapelen = len(self.shape) s = 0 iter = None while not frame.done(): - count_driver.jit_merge_point(arr=arr, frame=frame, iter=iter, s=s, + count_driver.jit_merge_point(arr=self, frame=frame, iter=iter, s=s, shapelen=shapelen) iter = frame.get_final_iter() - s += arr.dtype.getitem_bool(arr.storage, iter.offset) + s += self.dtype.getitem_bool(self, iter.offset) frame.next(shapelen) return s def getitem_filter(self, space, arr): concr = arr.get_concrete() - if concr.size > self.size: + if concr.get_size() > self.get_size(): raise OperationError(space.w_IndexError, space.wrap("index out of range for array")) - size = self.count_all_true(concr) - res = W_NDimArray(size, [size], self.find_dtype()) - ri = ArrayIterator(size) + size = concr.count_all_true() + res = W_NDimArray([size], self.find_dtype()) + ri = res.create_iter() shapelen = len(self.shape) argi = concr.create_iter() sig = self.find_sig() @@ -380,7 +405,7 @@ filter_driver.jit_merge_point(concr=concr, argi=argi, ri=ri, frame=frame, v=v, res=res, sig=sig, shapelen=shapelen, self=self) - if concr.dtype.getitem_bool(concr.storage, argi.offset): + if concr.dtype.getitem_bool(concr, argi.offset): v = sig.eval(frame, self) res.setitem(ri.offset, v) ri = ri.next(1) @@ -390,23 +415,6 @@ frame.next(shapelen) return res - def setitem_filter(self, space, idx, val): - size = self.count_all_true(idx) - arr = SliceArray([size], self.dtype, self, val) - sig = arr.find_sig() - shapelen = len(self.shape) - frame = sig.create_frame(arr) - idxi = idx.create_iter() - while not frame.done(): - filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, - frame=frame, arr=arr, - shapelen=shapelen) - if idx.dtype.getitem_bool(idx.storage, idxi.offset): - sig.eval(frame, arr) - frame.next_from_second(1) - frame.next_first(shapelen) - idxi = idxi.next(shapelen) - def descr_getitem(self, space, w_idx): if (isinstance(w_idx, BaseArray) and w_idx.shape == self.shape and w_idx.find_dtype().is_bool_type()): @@ -416,7 +424,24 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return self.create_slice(chunks) + return chunks.apply(self) + + def setitem_filter(self, space, idx, val): + size = idx.count_all_true() + arr = SliceArray([size], self.dtype, self, val) + sig = arr.find_sig() + shapelen = len(self.shape) + frame = sig.create_frame(arr) + idxi = idx.create_iter() + while not frame.done(): + filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, + frame=frame, arr=arr, + shapelen=shapelen) + if idx.dtype.getitem_bool(idx, idxi.offset): + sig.eval(frame, arr) + frame.next_from_second(1) + frame.next_first(shapelen) + idxi = idxi.next(shapelen) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -434,26 +459,9 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(chunks).get_concrete() + view = chunks.apply(self).get_concrete() view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, chunks): - shape = [] - i = -1 - for i, chunk in enumerate(chunks): - chunk.extend_shape(shape) - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - if not isinstance(self, ConcreteArray): - return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.shape, self.start, self.strides, - self.backstrides, chunks) - _, start, strides, backstrides = r - return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], self) - def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -470,13 +478,16 @@ w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - new_shape = get_shape_from_iterable(space, self.size, w_shape) + new_shape = get_shape_from_iterable(space, support.product(self.shape), + w_shape) return self.reshape(space, new_shape) def reshape(self, space, new_shape): concrete = self.get_concrete() # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, concrete.shape, + new_strides = None + if self.size > 0: + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides, concrete.order) if new_strides: # We can create a view, strides somehow match up. @@ -506,7 +517,7 @@ def descr_mean(self, space, w_axis=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) - w_denom = space.wrap(self.size) + w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) @@ -525,7 +536,7 @@ concr.fill(space, w_value) def descr_nonzero(self, space): - if self.size > 1: + if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -604,8 +615,7 @@ space.wrap("axis unsupported for take")) index_i = index.create_iter() res_shape = index.shape - size = support.product(res_shape) - res = W_NDimArray(size, res_shape[:], concr.dtype, concr.order) + res = W_NDimArray(res_shape[:], concr.dtype, concr.order) res_i = res.create_iter() shapelen = len(index.shape) sig = concr.find_sig() @@ -644,6 +654,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_tostring(self, space): + ra = ToStringArray(self) + loop.compute(ra) + return space.wrap(ra.s.build()) + def compute_first_step(self, sig, frame): pass @@ -665,8 +680,7 @@ """ Intermediate class representing a literal. """ - size = 1 - _attrs_ = ["dtype", "value", "shape"] + _attrs_ = ["dtype", "value", "shape", "size"] def __init__(self, dtype, value): self.shape = [] @@ -674,6 +688,7 @@ self.dtype = dtype assert isinstance(value, interp_boxes.W_GenericBox) self.value = value + self.size = dtype.get_size() def find_dtype(self): return self.dtype @@ -691,8 +706,7 @@ return self def reshape(self, space, new_shape): - size = support.product(new_shape) - res = W_NDimArray(size, new_shape, self.dtype, 'C') + res = W_NDimArray(new_shape, self.dtype, 'C') res.setitem(0, self.value) return res @@ -705,6 +719,7 @@ self.forced_result = None self.res_dtype = res_dtype self.name = name + self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): # Function for deleting references to source arrays, @@ -712,7 +727,7 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.size, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype) loop.compute(ra) return ra.left @@ -740,7 +755,6 @@ def __init__(self, child, chunks, shape): self.child = child self.chunks = chunks - self.size = support.product(shape) VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) def create_sig(self): @@ -752,7 +766,7 @@ def force_if_needed(self): if self.forced_result is None: concr = self.child.get_concrete() - self.forced_result = concr.create_slice(self.chunks) + self.forced_result = self.chunks.apply(concr) def _del_sources(self): self.child = None @@ -779,15 +793,12 @@ """ Intermediate class for performing binary operations. """ - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): VirtualArray.__init__(self, name, shape, res_dtype) self.ufunc = ufunc self.left = left self.right = right self.calc_dtype = calc_dtype - self.size = support.product(self.shape) def _del_sources(self): self.left = None @@ -815,15 +826,30 @@ self.left.create_sig(), self.right.create_sig()) class ResultArray(Call2): - def __init__(self, child, size, shape, dtype, res=None, order='C'): + def __init__(self, child, shape, dtype, res=None, order='C'): if res is None: - res = W_NDimArray(size, shape, dtype, order) + res = W_NDimArray(shape, dtype, order) Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): return signature.ResultSignature(self.res_dtype, self.left.create_sig(), self.right.create_sig()) +class ToStringArray(Call1): + def __init__(self, child): + dtype = child.find_dtype() + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) + Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, + child) + self.res = W_NDimArray([1], dtype, 'C') + self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res.storage) + + def create_sig(self): + return signature.ToStringSignature(self.calc_dtype, + self.values.create_sig()) + def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -856,8 +882,6 @@ self.right.create_sig(), done_func) class AxisReduce(Call2): - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim): Call2.__init__(self, ufunc, name, shape, dtype, dtype, left, right) @@ -897,13 +921,13 @@ """ _immutable_fields_ = ['storage'] - def __init__(self, size, shape, dtype, order='C', parent=None): - self.size = size + def __init__(self, shape, dtype, order='C', parent=None): self.parent = parent + self.size = support.product(shape) * dtype.get_size() if parent is not None: self.storage = parent.storage else: - self.storage = dtype.malloc(size) + self.storage = dtype.itemtype.malloc(self.size) self.order = order self.dtype = dtype From pullrequests-noreply at bitbucket.org Thu Mar 22 16:36:05 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 15:36:05 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322153605.27130.24030@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4256 Michael Blume (MichaelBlume) said: Can't seem to get NoneNotWrapped to work -- even if I set the default value of w_ndmin to NoneNotWrapped it still winds up being None sometimes inside the function. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Mar 22 16:41:16 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Mar 2012 16:41:16 +0100 (CET) Subject: [pypy-commit] pypy default: Update "Remove the GIL". The rest seem still up-to-date. Message-ID: <20120322154116.EA76182438@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53903:b5f7f04d3b6d Date: 2012-03-22 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/b5f7f04d3b6d/ Log: Update "Remove the GIL". The rest seem still up-to-date. diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ From pullrequests-noreply at bitbucket.org Thu Mar 22 16:57:39 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 15:57:39 -0000 Subject: [pypy-commit] [pypy/pypy] add module-level array_equal to numpypy (pull request #48) In-Reply-To: References: Message-ID: <20120322155739.23182.73968@bitbucket01.managed.contegix.com> Pull request #48 has been updated by Michael Blume to include new changes. https://bitbucket.org/pypy/pypy/pull-request/48/add-module-level-array_equal-to-numpypy Title: add module-level array_equal to numpypy Creator: Michael Blume Updated list of changes: 723f22fc5b96 by Michael Blume: "move array_equal tests to test_lib_pypy, as requested" fe9dc13f6b16 by Michael Blume: "add module-level asarray and array_equal" e48874635097 by Michael Blume: "add some array_equal tests" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 16:58:43 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 15:58:43 -0000 Subject: [pypy-commit] [pypy/pypy] add module-level array_equal to numpypy (pull request #48) In-Reply-To: References: Message-ID: <20120322155843.8577.68344@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/48/add-module-level-array_equal-to-numpypy#comment-4257 Michael Blume (MichaelBlume) said: Done =) -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Mar 22 17:08:23 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 17:08:23 +0100 (CET) Subject: [pypy-commit] pypy array_equal: add some array_equal tests Message-ID: <20120322160823.AA3E782438@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: array_equal Changeset: r53904:e48874635097 Date: 2012-03-12 23:17 -0700 http://bitbucket.org/pypy/pypy/changeset/e48874635097/ Log: add some array_equal tests diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -203,6 +203,44 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + + def test_type(self): from _numpypy import array ar = array(range(5)) From noreply at buildbot.pypy.org Thu Mar 22 17:08:24 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 17:08:24 +0100 (CET) Subject: [pypy-commit] pypy array_equal: add module-level asarray and array_equal Message-ID: <20120322160824.E378982438@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: array_equal Changeset: r53905:fe9dc13f6b16 Date: 2012-03-12 15:39 -0700 http://bitbucket.org/pypy/pypy/changeset/fe9dc13f6b16/ Log: add module-level asarray and array_equal diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) @@ -319,4 +438,4 @@ False_ = bool_(False) True_ = bool_(True) e = math.e -pi = math.pi \ No newline at end of file +pi = math.pi From noreply at buildbot.pypy.org Thu Mar 22 17:08:26 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 17:08:26 +0100 (CET) Subject: [pypy-commit] pypy array_equal: move array_equal tests to test_lib_pypy, as requested Message-ID: <20120322160826.27F2082438@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: array_equal Changeset: r53906:723f22fc5b96 Date: 2012-03-22 08:49 -0700 http://bitbucket.org/pypy/pypy/changeset/723f22fc5b96/ Log: move array_equal tests to test_lib_pypy, as requested diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -203,44 +203,6 @@ assert a.shape == (3,) assert a.dtype is dtype(int) - def test_equal(self): - from _numpypy import array - from numpypy import array_equal - - a = [1, 2, 3] - b = [1, 2, 3] - - assert array_equal(a, b) - assert array_equal(a, array(b)) - assert array_equal(array(a), b) - assert array_equal(array(a), array(b)) - - def test_not_equal(self): - from _numpypy import array - from numpypy import array_equal - - a = [1, 2, 3] - b = [1, 2, 4] - - assert not array_equal(a, b) - assert not array_equal(a, array(b)) - assert not array_equal(array(a), b) - assert not array_equal(array(a), array(b)) - - def test_mismatched_shape(self): - from _numpypy import array - from numpypy import array_equal - - a = [1, 2, 3] - b = [[1, 2, 3], [1, 2, 3]] - - assert not array_equal(a, b) - assert not array_equal(a, array(b)) - assert not array_equal(array(a), b) - assert not array_equal(array(a), array(b)) - - - def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) From noreply at buildbot.pypy.org Thu Mar 22 17:31:57 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:31:57 +0100 (CET) Subject: [pypy-commit] pypy default: array.py is no longer in lib_pypy, update the test Message-ID: <20120322163157.83B0582438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53908:a36850c80a60 Date: 2012-03-22 16:15 +0100 http://bitbucket.org/pypy/pypy/changeset/a36850c80a60/ Log: array.py is no longer in lib_pypy, update the test diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES From noreply at buildbot.pypy.org Thu Mar 22 17:31:58 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:31:58 +0100 (CET) Subject: [pypy-commit] pypy default: these tests require strut because they use pickle Message-ID: <20120322163158.B585E82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53909:5c49aad570ca Date: 2012-03-22 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/5c49aad570ca/ Log: these tests require strut because they use pickle diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") From noreply at buildbot.pypy.org Thu Mar 22 17:31:59 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:31:59 +0100 (CET) Subject: [pypy-commit] pypy default: these tests require struct as well Message-ID: <20120322163159.E7E0982438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53910:b7114e00c645 Date: 2012-03-22 16:24 +0100 http://bitbucket.org/pypy/pypy/changeset/b7114e00c645/ Log: these tests require struct as well diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,7 +106,7 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) cls.space.appexec([], """(): global continulet, A, __name__ From noreply at buildbot.pypy.org Thu Mar 22 17:32:01 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:01 +0100 (CET) Subject: [pypy-commit] pypy default: struct and array are needed for these tests Message-ID: <20120322163201.396BD82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53911:dc87a37e0f7f Date: 2012-03-22 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/dc87a37e0f7f/ Log: struct and array are needed for these tests diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) From noreply at buildbot.pypy.org Thu Mar 22 17:32:02 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:02 +0100 (CET) Subject: [pypy-commit] pypy default: add struct to even more tests Message-ID: <20120322163202.6E97182438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53912:3ba21b027543 Date: 2012-03-22 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/3ba21b027543/ Log: add struct to even more tests diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -90,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -179,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) From noreply at buildbot.pypy.org Thu Mar 22 17:32:03 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:03 +0100 (CET) Subject: [pypy-commit] pypy default: use the array modules; and s/binascii/_functools, because binascii is no longer in lib_pypy Message-ID: <20120322163203.A2C8182438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53913:f302d6955c86 Date: 2012-03-22 17:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f302d6955c86/ Log: use the array modules; and s/binascii/_functools, because binascii is no longer in lib_pypy diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -166,7 +166,7 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) From noreply at buildbot.pypy.org Thu Mar 22 17:32:04 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:04 +0100 (CET) Subject: [pypy-commit] pypy default: we no longer have lib_pypy/binascii.py to get the crc32_tab from. Fish it from module/binascii instead, even if it's suboptimal because rlib should not import from pypy.module :-( Message-ID: <20120322163204.D38B082438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53914:82854a176b6c Date: 2012-03-22 17:14 +0100 http://bitbucket.org/pypy/pypy/changeset/82854a176b6c/ Log: we no longer have lib_pypy/binascii.py to get the crc32_tab from. Fish it from module/binascii instead, even if it's suboptimal because rlib should not import from pypy.module :-( diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] From noreply at buildbot.pypy.org Thu Mar 22 17:32:06 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:06 +0100 (CET) Subject: [pypy-commit] pypy default: add struct here and there Message-ID: <20120322163206.1429A82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53915:c4cd98ba27c4 Date: 2012-03-22 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/c4cd98ba27c4/ Log: add struct here and there diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() From noreply at buildbot.pypy.org Thu Mar 22 17:32:07 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:07 +0100 (CET) Subject: [pypy-commit] pypy default: lib_pypy/binascii.py has gone Message-ID: <20120322163207.51F2982438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53916:002ee51fa02f Date: 2012-03-22 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/002ee51fa02f/ Log: lib_pypy/binascii.py has gone diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") From noreply at buildbot.pypy.org Thu Mar 22 17:32:08 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:08 +0100 (CET) Subject: [pypy-commit] pypy default: binascii is no longer there, use _functools instead Message-ID: <20120322163208.89EAE82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53917:903a3a64b847 Date: 2012-03-22 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/903a3a64b847/ Log: binascii is no longer there, use _functools instead diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') From noreply at buildbot.pypy.org Thu Mar 22 17:32:09 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:09 +0100 (CET) Subject: [pypy-commit] pypy default: add more struct everywhere Message-ID: <20120322163209.BE93382438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53918:b1dc88c9996e Date: 2012-03-22 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/b1dc88c9996e/ Log: add more struct everywhere diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) From noreply at buildbot.pypy.org Thu Mar 22 17:32:11 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 17:32:11 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120322163211.0040282438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53919:7879ba98d7dd Date: 2012-03-22 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/7879ba98d7dd/ Log: merge heads diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) From noreply at buildbot.pypy.org Thu Mar 22 17:36:57 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Mar 2012 17:36:57 +0100 (CET) Subject: [pypy-commit] pypy default: Update doc about sandbox. Message-ID: <20120322163657.D4F8582438@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53920:7cd7f27ea5cc Date: 2012-03-22 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/7cd7f27ea5cc/ Log: Update doc about sandbox. diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: From pullrequests-noreply at bitbucket.org Thu Mar 22 17:42:33 2012 From: pullrequests-noreply at bitbucket.org (Maciej Fijalkowski) Date: Thu, 22 Mar 2012 16:42:33 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322164233.26570.59487@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4258 Maciej Fijalkowski (fijal) said: I doubt you can. Just live with the fact that you can get both :/ -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Mar 22 19:20:12 2012 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Mar 2012 19:20:12 +0100 (CET) Subject: [pypy-commit] pypy default: another one to ignore Message-ID: <20120322182012.DC40982438@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53921:f18c75ec9e0f Date: 2012-03-22 20:19 +0200 http://bitbucket.org/pypy/pypy/changeset/f18c75ec9e0f/ Log: another one to ignore diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers From pullrequests-noreply at bitbucket.org Thu Mar 22 19:45:41 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 18:45:41 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322184541.18651.36950@bitbucket13.managed.contegix.com> Pull request #65 has been updated by Michael Blume to include new changes. https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray Title: add param ndmin to numpypy.array Creator: Michael Blume Updated list of changes: cbe115c15c1a by Michael Blume: "test the no-op case for ndmin" c22d05cd841d by Michael Blume: "make test pass -- add ndmin param to numpy.array" 67cb9cfe42fe by Michael Blume: "add failing test of ndmin parameter of numpy.array" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Mar 22 20:48:41 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Thu, 22 Mar 2012 19:48:41 -0000 Subject: [pypy-commit] [pypy/pypy] add param ndmin to numpypy.array (pull request #65) In-Reply-To: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> References: <8550cc4c3d7c8255184469f6db4be5e8@bitbucket.org> Message-ID: <20120322194841.8661.45948@bitbucket03.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/65/add-param-ndmin-to-numpypyarray#comment-4270 Michael Blume (MichaelBlume) said: In that case, can I get a merge? ^_^ -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Mar 22 20:53:58 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 20:53:58 +0100 (CET) Subject: [pypy-commit] pypy ndmin: add failing test of ndmin parameter of numpy.array Message-ID: <20120322195358.3FB927107FD@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: ndmin Changeset: r53922:67cb9cfe42fe Date: 2012-03-19 23:41 -0700 http://bitbucket.org/pypy/pypy/changeset/67cb9cfe42fe/ Log: add failing test of ndmin parameter of numpy.array diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,12 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) From noreply at buildbot.pypy.org Thu Mar 22 20:53:59 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 20:53:59 +0100 (CET) Subject: [pypy-commit] pypy ndmin: make test pass -- add ndmin param to numpy.array Message-ID: <20120322195359.DF8C77107FD@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: ndmin Changeset: r53923:c22d05cd841d Date: 2012-03-19 23:43 -0700 http://bitbucket.org/pypy/pypy/changeset/c22d05cd841d/ Log: make test pass -- add ndmin param to numpy.array diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1125,7 +1125,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1171,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): From noreply at buildbot.pypy.org Thu Mar 22 20:54:01 2012 From: noreply at buildbot.pypy.org (MichaelBlume) Date: Thu, 22 Mar 2012 20:54:01 +0100 (CET) Subject: [pypy-commit] pypy ndmin: test the no-op case for ndmin Message-ID: <20120322195401.219F77107FD@wyvern.cs.uni-duesseldorf.de> Author: Mike Blume Branch: ndmin Changeset: r53924:cbe115c15c1a Date: 2012-03-22 18:42 +0000 http://bitbucket.org/pypy/pypy/changeset/cbe115c15c1a/ Log: test the no-op case for ndmin diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -214,6 +214,12 @@ def test_ndmin(self): from _numpypy import array + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + arr = array([1], ndmin=3) assert arr.shape == (1, 1, 1) From noreply at buildbot.pypy.org Thu Mar 22 21:07:39 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 21:07:39 +0100 (CET) Subject: [pypy-commit] pypy default: use the array module only in test_arraymodule, else the other tests start failing for obscure reasons Message-ID: <20120322200739.58A8E82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53926:a493491d7aad Date: 2012-03-22 20:57 +0100 http://bitbucket.org/pypy/pypy/changeset/a493491d7aad/ Log: use the array module only in test_arraymodule, else the other tests start failing for obscure reasons diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py @@ -5,6 +6,7 @@ class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False + extra_modules = ['array'] def test_basic(self): module = self.import_module(name='array') diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -165,8 +165,11 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + extra_modules = [] + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'] + + cls.extra_modules) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts From noreply at buildbot.pypy.org Thu Mar 22 21:07:40 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 21:07:40 +0100 (CET) Subject: [pypy-commit] pypy default: we need to set continuation=True for these tests to work. No idea how could they work before Message-ID: <20120322200740.D9A6982438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53927:4c31c12c5272 Date: 2012-03-22 21:00 +0100 http://bitbucket.org/pypy/pypy/changeset/4c31c12c5272/ Log: we need to set continuation=True for these tests to work. No idea how could they work before diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -108,6 +108,7 @@ def setup_class(cls): cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ From noreply at buildbot.pypy.org Thu Mar 22 21:07:42 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 21:07:42 +0100 (CET) Subject: [pypy-commit] pypy default: hopefully the last test for which we need to use the struct module Message-ID: <20120322200742.1C43D82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53928:731a24c3d239 Date: 2012-03-22 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/731a24c3d239/ Log: hopefully the last test for which we need to use the struct module diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): From noreply at buildbot.pypy.org Thu Mar 22 21:37:43 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 21:37:43 +0100 (CET) Subject: [pypy-commit] pypy default: the 'run' task has been killed, update the test Message-ID: <20120322203743.3D3A982438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53929:1750949d36ad Date: 2012-03-22 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/1750949d36ad/ Log: the 'run' task has been killed, update the test diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Thu Mar 22 21:47:35 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 21:47:35 +0100 (CET) Subject: [pypy-commit] pypy default: move unicode_w from W_StringObject up to W_AbstractStringObject; this way, it is automatically used also by ropes, which fixes test_unicode_join_str_arg_utf8 Message-ID: <20120322204736.005EB7107FD@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53930:d148511060f8 Date: 2012-03-22 21:47 +0100 http://bitbucket.org/pypy/pypy/changeset/d148511060f8/ Log: move unicode_w from W_StringObject up to W_AbstractStringObject; this way, it is automatically used also by ropes, which fixes test_unicode_join_str_arg_utf8 diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') From noreply at buildbot.pypy.org Thu Mar 22 22:05:52 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 Mar 2012 22:05:52 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20120322210552.E231A82438@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53931:878f3390e9c5 Date: 2012-03-22 22:05 +0100 http://bitbucket.org/pypy/pypy/changeset/878f3390e9c5/ Log: hg merge default diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,530 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, bytes): - self.fromstring(initializer) - elif isinstance(initializer, str) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - tobytes = tostring - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return "".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, - 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, - 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, - 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, - 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, - 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, - 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, - 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, - 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, - 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, - 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, - 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, - 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, - 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, - 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, - 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, - 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, - 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, - 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, - 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, - 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, - 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, - 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, - 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, - 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, - 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, - 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, - 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, - 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, - 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, - 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, - 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, - 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, - 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, - 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, - 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, - 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, - 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, - 0x2d02ef8d -] - -def crc32(s, crc=0): - result = 0 - crc = ~int(crc) & 0xffffffff - for c in s: - crc = crc_32_tab[(crc ^ c) & 0xff] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffff - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (char >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = char & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(c) - return bytes(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[s[0]], table_hex[s[1]] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append((a << 4) + b) - return bytes(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(' %i0 - int_return %i0 - """) + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -62,6 +62,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_method_names(self): import _hashlib diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -374,10 +374,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror as ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -72,7 +72,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -145,7 +145,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py @@ -5,6 +6,7 @@ class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False + extra_modules = ['array'] def test_basic(self): module = self.import_module(name='array') diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -165,8 +165,11 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + extra_modules = [] + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'] + + cls.extra_modules) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -1050,7 +1050,11 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): - def test_meta_path_1(self): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + + def test_meta_path(self): tried_imports = [] class Importer(object): def find_module(self, fullname, path=None): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -842,7 +842,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1125,7 +1125,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1171,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -39,11 +39,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -36,6 +36,20 @@ return None return space.wrap(compute_unique_id(space.bytes_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -256,4 +256,3 @@ # the fast path, and tries to call type() (which is set to None just # above) space.isinstance_w(w_a, space.w_unicode) # does not crash - diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -770,6 +770,10 @@ checkadr(adr) return llmemory.cast_adr_to_int(adr, mode) + def op_convert_float_bytes_to_longlong(self, f): + from pypy.rlib import longlong2float + return longlong2float.float2longlong(f) + def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -349,6 +349,7 @@ 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() + 'convert_float_bytes_to_longlong': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Thu Mar 22 23:22:22 2012 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 Mar 2012 23:22:22 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: merge from default Message-ID: <20120322222222.6798282438@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53932:592aa2194480 Date: 2012-03-22 22:39 +0200 http://bitbucket.org/pypy/pypy/changeset/592aa2194480/ Log: merge from default diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -6,7 +6,7 @@ import _numpypy as multiarray # ARGH from numpypy.core.arrayprint import array2string - +newaxis = None def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): """ @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) @@ -319,4 +438,4 @@ False_ = bool_(False) True_ = bool_(True) e = math.e -pi = math.pi \ No newline at end of file +pi = math.pi diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/__init__.py b/pypy/__init__.py --- a/pypy/__init__.py +++ b/pypy/__init__.py @@ -1,1 +1,16 @@ # Empty + +# XXX Should be empty again, soon. +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -148,7 +148,6 @@ "the attribute here; the list of read locations is:\n" + '\n'.join([str(loc[0]) for loc in self.read_locations])) - class ClassDef(object): "Wraps a user class." diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1336,7 +1336,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -901,15 +901,17 @@ def __init__(self, source, filename=None, modname='__builtin__'): # HAAACK (but a good one) + self.filename = filename + self.source = str(py.code.Source(source).deindent()) + self.modname = modname if filename is None: f = sys._getframe(1) filename = '<%s:%d>' % (f.f_code.co_filename, f.f_lineno) + if not os.path.exists(filename): + # make source code available for tracebacks + lines = [x + "\n" for x in source.split("\n")] + py.std.linecache.cache[filename] = (1, None, lines, filename) self.filename = filename - self.source = str(py.code.Source(source).deindent()) - self.modname = modname - # make source code available for tracebacks - lines = [x + "\n" for x in source.split("\n")] - py.std.linecache.cache[filename] = (1, None, lines, filename) def __repr__(self): return "" % (self.filename,) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -16,9 +16,11 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException from pypy.jit.codewriter import heaptracker, longlong +from pypy.rlib import longlong2float from pypy.rlib.rarithmetic import intmask, is_valid_int from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -1496,13 +1498,30 @@ c_nest, c_nest], 'void') def test_read_timestamp(self): + if sys.platform == 'win32': + # windows quite often is very inexact (like the old Intel 8259 PIC), + # so we stretch the time a little bit. + # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini, + # the test starts working at delay == 21670 and stops at 20600000. + # We take the geometric mean value. + from math import log, exp + delay_min = 21670 + delay_max = 20600000 + delay = int(exp((log(delay_min)+log(delay_max))/2)) + def wait_a_bit(): + for i in xrange(delay): pass + else: + def wait_a_bit(): + pass if longlong.is_64_bit: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') res1 = got1.getint() res2 = got2.getint() else: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') res1 = got1.getlonglong() res2 = got2.getlonglong() @@ -1598,6 +1617,12 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_convert_float_bytes(self): + t = 'int' if longlong.is_64_bit else 'float' + res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, + [boxfloat(2.5)], t).value + assert res == longlong2float.float2longlong(2.5) + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -449,6 +449,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) +OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) OperationBuilder.OPERATIONS = OPERATIONS @@ -502,11 +503,11 @@ else: assert 0, "unknown backend %r" % pytest.config.option.backend -# ____________________________________________________________ +# ____________________________________________________________ class RandomLoop(object): dont_generate_more = False - + def __init__(self, cpu, builder_factory, r, startvars=None): self.cpu = cpu if startvars is None: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -606,7 +606,7 @@ else: assert token struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): @@ -665,7 +665,7 @@ ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] operations.extend(ops) - + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: @@ -836,8 +836,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -847,8 +847,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -1242,6 +1242,15 @@ self.mc.MOVD_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1954,8 +1963,6 @@ mc.PUSH_r(ebx.value) elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2117,9 +2124,12 @@ # First, we need to save away the registers listed in # 'save_registers' that are not callee-save. XXX We assume that # the XMM registers won't be modified. We store them in - # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the - # single argument to closestack_addr below. - p = WORD + # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP] + # for the single argument to closestack_addr below. + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_sr(p, reg.value) @@ -2174,7 +2184,10 @@ # self._emit_call(-1, imm(self.releasegil_addr), args) # Finally, restore the registers saved above. - p = WORD + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_rs(reg.value, p) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -766,6 +766,18 @@ consider_cast_singlefloat_to_float = consider_cast_int_to_float + def consider_convert_float_bytes_to_longlong(self, op): + if longlong.is_64_bit: + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + else: + loc0 = self.xrm.loc(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,9 +601,10 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) + # These work on machine sized registers. + MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/test/test_zmath.py b/pypy/jit/backend/x86/test/test_zmath.py --- a/pypy/jit/backend/x86/test/test_zmath.py +++ b/pypy/jit/backend/x86/test/test_zmath.py @@ -6,6 +6,8 @@ from pypy.translator.c.test.test_genc import compile from pypy.jit.backend.x86.support import ensure_sse2_floats from pypy.rlib import rfloat +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_print def get_test_case((fnname, args, expected)): @@ -16,16 +18,32 @@ expect_valueerror = (expected == ValueError) expect_overflowerror = (expected == OverflowError) check = test_direct.get_tester(expected) + unroll_args = unrolling_iterable(args) # def testfn(): + debug_print('calling', fnname, 'with arguments:') + for arg in unroll_args: + debug_print('\t', arg) try: got = fn(*args) except ValueError: - return expect_valueerror + if expect_valueerror: + return True + else: + debug_print('unexpected ValueError!') + return False except OverflowError: - return expect_overflowerror + if expect_overflowerror: + return True + else: + debug_print('unexpected OverflowError!') + return False else: - return check(got) + if check(got): + return True + else: + debug_print('unexpected result:', got) + return False # testfn.func_name = 'test_' + fnname return testfn diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -291,6 +291,11 @@ op1 = SpaceOperation('-live-', [], None) return [op, op1] + def _noop_rewrite(self, op): + return op + + rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,6 +968,21 @@ int_return %i2 """, transform=True) + def test_convert_float_bytes_to_int(self): + from pypy.rlib.longlong2float import float2longlong + def f(x): + return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" + self.encoding_test(f, [25.0], """ + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) + def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,15 +1,16 @@ +from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr +from pypy.jit.metainterp.compile import ResumeAtPositionDescr +from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise +from pypy.rlib import longlong2float +from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rtimer import read_timestamp -from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import debug_start, debug_stop, ll_assert -from pypy.rlib.debug import make_sure_not_resized from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from pypy.jit.codewriter import heaptracker, longlong -from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise -from pypy.jit.metainterp.compile import ResumeAtPositionDescr + def arguments(*argtypes, **kwds): resulttype = kwds.pop('returns', None) @@ -20,6 +21,9 @@ return function return decorate +LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f' + + class LeaveFrame(JitException): pass @@ -663,6 +667,11 @@ a = float(a) return longlong.getfloatstorage(a) + @arguments("f", returns=LONGLONG_TYPECODE) + def bhimpl_convert_float_bytes_to_longlong(a): + a = longlong.getrealfloat(a) + return longlong2float.float2longlong(a) + # ---------- # control flow operations @@ -1309,7 +1318,7 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=(longlong.is_64_bit and "i" or "f")) + @arguments(returns=LONGLONG_TYPECODE) def bhimpl_ll_read_timestamp(): return read_timestamp() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -223,6 +223,7 @@ 'cast_float_to_singlefloat', 'cast_singlefloat_to_float', 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', + 'convert_float_bytes_to_longlong', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -419,6 +419,7 @@ 'CAST_INT_TO_FLOAT/1', # need some messy code in the backend 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', + 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3,6 +3,7 @@ import py from pypy import conftest +from pypy.jit.codewriter import longlong from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT @@ -14,6 +15,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) +from pypy.rlib.longlong2float import float2longlong from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -292,7 +294,7 @@ assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -953,7 +955,7 @@ self.meta_interp(f, [20], repeat=7) # the loop and the entry path as a single trace self.check_jitcell_token_count(1) - + # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path @@ -1470,7 +1472,7 @@ assert res == f(299) self.check_resops(guard_class=0, guard_nonnull=4, guard_nonnull_class=4, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1499,7 +1501,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1528,7 +1530,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -2636,7 +2638,7 @@ return sa assert self.meta_interp(f, [20]) == f(20) self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) - + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2677,7 +2679,7 @@ assert self.meta_interp(f, [20, 3]) == f(20, 3) self.check_jitcell_token_count(1) self.check_target_token_count(5) - + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2815,7 +2817,7 @@ for cell in get_stats().get_all_jitcell_tokens(): # Initialal trace with two labels and 5 retraces assert len(cell.target_tokens) <= 7 - + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -3793,6 +3795,16 @@ res = self.interp_operations(g, [1]) assert res == 3 + def test_float2longlong(self): + def f(n): + return float2longlong(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + # There are tests elsewhere to verify the correctness of this. + expected = float2longlong(x) + res = self.interp_operations(f, [x]) + assert longlong.getfloatstorage(res) == expected + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,7 +106,7 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,7 +1,6 @@ from pypy.conftest import gettestobjspace import os import py -from pypy.rlib.rarithmetic import is_valid_int class AppTestSSL: @@ -31,7 +30,6 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert is_valid_int(_ssl.OPENSSL_VERSION_NUMBER) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) @@ -92,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -181,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -10,7 +10,7 @@ [('next', PyInterpreterState)], PyInterpreterStateStruct) PyThreadState = lltype.Ptr(cpython_struct( - "PyThreadState", + "PyThreadState", [('interp', PyInterpreterState), ('dict', PyObject), ])) @@ -19,12 +19,15 @@ def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread support is enabled) and reset the thread state to NULL, returning the - previous thread state (which is not NULL except in PyPy). If the lock has been created, + previous thread state. If the lock has been created, the current thread must have acquired it. (This function is available even when thread support is disabled at compile time.)""" + state = space.fromcache(InterpreterState) if rffi.aroundstate.before: rffi.aroundstate.before() - return lltype.nullptr(PyThreadState.TO) + tstate = state.swap_thread_state( + space, lltype.nullptr(PyThreadState.TO)) + return tstate @cpython_api([PyThreadState], lltype.Void) def PyEval_RestoreThread(space, tstate): @@ -35,6 +38,8 @@ when thread support is disabled at compile time.)""" if rffi.aroundstate.after: rffi.aroundstate.after() + state = space.fromcache(InterpreterState) + state.swap_thread_state(space, tstate) @cpython_api([], lltype.Void) def PyEval_InitThreads(space): @@ -67,28 +72,91 @@ dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext + +# Keep track of the ThreadStateCapsule for a particular execution context. The +# default is for new execution contexts not to have one; it is allocated on the +# first cpyext-based request for it. ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) +# Also keep track of whether it has been initialized yet or not (None is a valid +# PyThreadState for an execution context to have, when the GIL has been +# released, so a check against that can't be used to determine the need for +# initialization). +ExecutionContext.cpyext_initialized_threadstate = False + +def cleanup_cpyext_state(self): + try: + del self.cpyext_threadstate + except AttributeError: + pass + self.cpyext_initialized_threadstate = False +ExecutionContext.cleanup_cpyext_state = cleanup_cpyext_state + class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) def new_thread_state(self, space): + """ + Create a new ThreadStateCapsule to hold the PyThreadState for a + particular execution context. + + :param space: A space. + + :returns: A new ThreadStateCapsule holding a newly allocated + PyThreadState and referring to this interpreter state. + """ capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state ts.c_dict = make_ref(space, space.newdict()) return capsule + def get_thread_state(self, space): + """ + Get the current PyThreadState for the current execution context. + + :param space: A space. + + :returns: The current PyThreadState for the current execution context, + or None if it does not have one. + """ ec = space.getexecutioncontext() return self._get_thread_state(space, ec).memory + + def swap_thread_state(self, space, tstate): + """ + Replace the current thread state of the current execution context with a + new thread state. + + :param space: The space. + + :param tstate: The new PyThreadState for the current execution context. + + :returns: The old thread state for the current execution context, either + None or a PyThreadState. + """ + ec = space.getexecutioncontext() + capsule = self._get_thread_state(space, ec) + old_tstate = capsule.memory + capsule.memory = tstate + return old_tstate + def _get_thread_state(self, space, ec): - if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): + """ + Get the ThreadStateCapsule for the given execution context, possibly + creating a new one if it does not already have one. + + :param space: The space. + :param ec: The ExecutionContext of which to get the thread state. + :returns: The ThreadStateCapsule for the given execution context. + """ + if not ec.cpyext_initialized_threadstate: ec.cpyext_threadstate = self.new_thread_state(space) - + ec.cpyext_initialized_threadstate = True return ec.cpyext_threadstate @cpython_api([], PyThreadState, error=CANNOT_FAIL) @@ -105,13 +173,8 @@ def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument tstate, which may be NULL. The global interpreter lock must be held.""" - # All cpyext calls release and acquire the GIL, so this function has no - # side-effects - if tstate: - return lltype.nullptr(PyThreadState.TO) - else: - state = space.fromcache(InterpreterState) - return state.get_thread_state(space) + state = space.fromcache(InterpreterState) + return state.swap_thread_state(space, tstate) @cpython_api([PyThreadState], lltype.Void) def PyEval_AcquireThread(space, tstate): diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -23,16 +23,33 @@ #define FLAG_COMPAT 1 #define FLAG_SIZE_T 2 +typedef int (*destr_t)(PyObject *, void *); + + +/* Keep track of "objects" that have been allocated or initialized and + which will need to be deallocated or cleaned up somehow if overall + parsing fails. +*/ +typedef struct { + void *item; + destr_t destructor; +} freelistentry_t; + +typedef struct { + int first_available; + freelistentry_t *entries; +} freelist_t; + /* Forward */ static int vgetargs1(PyObject *, const char *, va_list *, int); static void seterror(int, const char *, int *, const char *, const char *); static char *convertitem(PyObject *, const char **, va_list *, int, int *, - char *, size_t, PyObject **); + char *, size_t, freelist_t *); static char *converttuple(PyObject *, const char **, va_list *, int, - int *, char *, size_t, int, PyObject **); + int *, char *, size_t, int, freelist_t *); static char *convertsimple(PyObject *, const char **, va_list *, int, char *, - size_t, PyObject **); + size_t, freelist_t *); static Py_ssize_t convertbuffer(PyObject *, void **p, char **); static int getbuffer(PyObject *, Py_buffer *, char**); @@ -129,57 +146,56 @@ /* Handle cleanup of allocated memory in case of exception */ -static void -cleanup_ptr(void *ptr) +static int +cleanup_ptr(PyObject *self, void *ptr) { - PyMem_FREE(ptr); -} - -static void -cleanup_buffer(void *ptr) -{ - PyBuffer_Release((Py_buffer *) ptr); + if (ptr) { + PyMem_FREE(ptr); + } + return 0; } static int -addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) +cleanup_buffer(PyObject *self, void *ptr) { - PyObject *cobj; - if (!*freelist) { - *freelist = PyList_New(0); - if (!*freelist) { - destr(ptr); - return -1; - } - } - cobj = PyCObject_FromVoidPtr(ptr, destr); - if (!cobj) { - destr(ptr); - return -1; - } - if (PyList_Append(*freelist, cobj)) { - Py_DECREF(cobj); - return -1; - } - Py_DECREF(cobj); - return 0; + Py_buffer *buf = (Py_buffer *)ptr; + if (buf) { + PyBuffer_Release(buf); + } + return 0; } static int -cleanreturn(int retval, PyObject *freelist) +addcleanup(void *ptr, freelist_t *freelist, destr_t destructor) { - if (freelist && retval != 0) { - /* We were successful, reset the destructors so that they - don't get called. */ - Py_ssize_t len = PyList_GET_SIZE(freelist), i; - for (i = 0; i < len; i++) - ((PyCObject *) PyList_GET_ITEM(freelist, i)) - ->destructor = NULL; - } - Py_XDECREF(freelist); - return retval; + int index; + + index = freelist->first_available; + freelist->first_available += 1; + + freelist->entries[index].item = ptr; + freelist->entries[index].destructor = destructor; + + return 0; } +static int +cleanreturn(int retval, freelist_t *freelist) +{ + int index; + + if (retval == 0) { + /* A failure occurred, therefore execute all of the cleanup + functions. + */ + for (index = 0; index < freelist->first_available; ++index) { + freelist->entries[index].destructor(NULL, + freelist->entries[index].item); + } + } + PyMem_Free(freelist->entries); + return retval; +} static int vgetargs1(PyObject *args, const char *format, va_list *p_va, int flags) @@ -195,7 +211,7 @@ const char *formatsave = format; Py_ssize_t i, len; char *msg; - PyObject *freelist = NULL; + freelist_t freelist = {0, NULL}; int compat = flags & FLAG_COMPAT; assert(compat || (args != (PyObject*)NULL)); @@ -251,16 +267,18 @@ format = formatsave; + freelist.entries = PyMem_New(freelistentry_t, max); + if (compat) { if (max == 0) { if (args == NULL) - return 1; + return cleanreturn(1, &freelist); PyOS_snprintf(msgbuf, sizeof(msgbuf), "%.200s%s takes no arguments", fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } else if (min == 1 && max == 1) { if (args == NULL) { @@ -269,26 +287,26 @@ fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } msg = convertitem(args, &format, p_va, flags, levels, msgbuf, sizeof(msgbuf), &freelist); if (msg == NULL) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); seterror(levels[0], msg, levels+1, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } else { PyErr_SetString(PyExc_SystemError, "old style getargs format uses new features"); - return 0; + return cleanreturn(0, &freelist); } } if (!PyTuple_Check(args)) { PyErr_SetString(PyExc_SystemError, "new style getargs format but argument is not a tuple"); - return 0; + return cleanreturn(0, &freelist); } len = PyTuple_GET_SIZE(args); @@ -308,7 +326,7 @@ message = msgbuf; } PyErr_SetString(PyExc_TypeError, message); - return 0; + return cleanreturn(0, &freelist); } for (i = 0; i < len; i++) { @@ -319,7 +337,7 @@ sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -328,10 +346,10 @@ *format != '|' && *format != ':' && *format != ';') { PyErr_Format(PyExc_SystemError, "bad format string: %.200s", formatsave); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } @@ -395,7 +413,7 @@ static char * converttuple(PyObject *arg, const char **p_format, va_list *p_va, int flags, int *levels, char *msgbuf, size_t bufsize, int toplevel, - PyObject **freelist) + freelist_t *freelist) { int level = 0; int n = 0; @@ -472,7 +490,7 @@ static char * convertitem(PyObject *arg, const char **p_format, va_list *p_va, int flags, - int *levels, char *msgbuf, size_t bufsize, PyObject **freelist) + int *levels, char *msgbuf, size_t bufsize, freelist_t *freelist) { char *msg; const char *format = *p_format; @@ -539,7 +557,7 @@ static char * convertsimple(PyObject *arg, const char **p_format, va_list *p_va, int flags, - char *msgbuf, size_t bufsize, PyObject **freelist) + char *msgbuf, size_t bufsize, freelist_t *freelist) { /* For # codes */ #define FETCH_SIZE int *q=NULL;Py_ssize_t *q2=NULL;\ @@ -1501,7 +1519,9 @@ const char *fname, *msg, *custom_msg, *keyword; int min = INT_MAX; int i, len, nargs, nkeywords; - PyObject *freelist = NULL, *current_arg; + PyObject *current_arg; + freelist_t freelist = {0, NULL}; + assert(args != NULL && PyTuple_Check(args)); assert(keywords == NULL || PyDict_Check(keywords)); @@ -1525,6 +1545,8 @@ for (len=0; kwlist[len]; len++) continue; + freelist.entries = PyMem_New(freelistentry_t, len); + nargs = PyTuple_GET_SIZE(args); nkeywords = (keywords == NULL) ? 0 : PyDict_Size(keywords); if (nargs + nkeywords > len) { @@ -1535,7 +1557,7 @@ len, (len == 1) ? "" : "s", nargs + nkeywords); - return 0; + return cleanreturn(0, &freelist); } /* convert tuple args and keyword args in same loop, using kwlist to drive process */ @@ -1549,7 +1571,7 @@ PyErr_Format(PyExc_RuntimeError, "More keyword list entries (%d) than " "format specifiers (%d)", len, i); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } current_arg = NULL; if (nkeywords) { @@ -1563,11 +1585,11 @@ "Argument given by name ('%s') " "and position (%d)", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } else if (nkeywords && PyErr_Occurred()) - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); else if (i < nargs) current_arg = PyTuple_GET_ITEM(args, i); @@ -1576,7 +1598,7 @@ levels, msgbuf, sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, custom_msg); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } continue; } @@ -1585,14 +1607,14 @@ PyErr_Format(PyExc_TypeError, "Required argument " "'%s' (pos %d) not found", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* current code reports success when all required args * fulfilled and no keyword args left, with no further * validation. XXX Maybe skip this in debug build ? */ if (!nkeywords) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); /* We are into optional args, skip thru to any remaining * keyword args */ @@ -1600,7 +1622,7 @@ if (msg) { PyErr_Format(PyExc_RuntimeError, "%s: '%s'", msg, format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -1608,7 +1630,7 @@ PyErr_Format(PyExc_RuntimeError, "more argument specifiers than keyword list entries " "(remaining format:'%s')", format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* make sure there are no extraneous keyword arguments */ @@ -1621,7 +1643,7 @@ if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } ks = PyString_AsString(key); for (i = 0; i < len; i++) { @@ -1635,12 +1657,12 @@ "'%s' is an invalid keyword " "argument for this function", ks); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,10 +106,7 @@ del obj import gc; gc.collect() - try: - del space.getexecutioncontext().cpyext_threadstate - except AttributeError: - pass + space.getexecutioncontext().cleanup_cpyext_state() for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) @@ -169,7 +166,7 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -3,6 +3,10 @@ from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState from pypy.module.cpyext.pyobject import from_ref +from pypy.rpython.lltypesystem import lltype +from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts +from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head +from pypy.tool import leakfinder class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -21,6 +25,93 @@ # Should compile at least module.test() + + def test_thread_state_get(self): + module = self.import_extension('foo', [ + ("get", "METH_NOARGS", + """ + PyThreadState *tstate = PyThreadState_Get(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + if (tstate->interp != PyInterpreterState_Head()) { + return PyLong_FromLong(1); + } + if (tstate->interp->next != NULL) { + return PyLong_FromLong(2); + } + return PyLong_FromLong(3); + """), + ]) + assert module.get() == 3 + + def test_basic_threadstate_dance(self): + module = self.import_extension('foo', [ + ("dance", "METH_NOARGS", + """ + PyThreadState *old_tstate, *new_tstate; + + old_tstate = PyThreadState_Swap(NULL); + if (old_tstate == NULL) { + return PyLong_FromLong(0); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != NULL) { + return PyLong_FromLong(1); + } + + new_tstate = PyThreadState_Swap(old_tstate); + if (new_tstate != NULL) { + return PyLong_FromLong(2); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != old_tstate) { + return PyLong_FromLong(3); + } + + return PyLong_FromLong(4); + """), + ]) + assert module.dance() == 4 + + def test_threadstate_dict(self): + module = self.import_extension('foo', [ + ("getdict", "METH_NOARGS", + """ + PyObject *dict = PyThreadState_GetDict(); + Py_INCREF(dict); + return dict; + """), + ]) + assert isinstance(module.getdict(), dict) + + def test_savethread(self): + module = self.import_extension('foo', [ + ("bounce", "METH_NOARGS", + """ + PyThreadState *tstate = PyEval_SaveThread(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + + if (PyThreadState_Get() != NULL) { + return PyLong_FromLong(1); + } + + PyEval_RestoreThread(tstate); + + if (PyThreadState_Get() != tstate) { + return PyLong_FromLong(2); + } + + return PyLong_FromLong(3); + """), + ]) + + + class TestInterpreterState(BaseApiTest): def test_interpreter_head(self, space, api): state = api.PyInterpreterState_Head() @@ -29,31 +120,3 @@ def test_interpreter_next(self, space, api): state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) - -class TestThreadState(BaseApiTest): - def test_thread_state_get(self, space, api): - ts = api.PyThreadState_Get() - assert ts != nullptr(PyThreadState.TO) - - def test_thread_state_interp(self, space, api): - ts = api.PyThreadState_Get() - assert ts.c_interp == api.PyInterpreterState_Head() - assert ts.c_interp.c_next == nullptr(PyInterpreterState.TO) - - def test_basic_threadstate_dance(self, space, api): - # Let extension modules call these functions, - # Not sure of the semantics in pypy though. - # (cpyext always acquires and releases the GIL around calls) - tstate = api.PyThreadState_Swap(None) - assert tstate is not None - assert not api.PyThreadState_Swap(tstate) - - api.PyEval_AcquireThread(tstate) - api.PyEval_ReleaseThread(tstate) - - def test_threadstate_dict(self, space, api): - ts = api.PyThreadState_Get() - ref = ts.c_dict - assert ref == api.PyThreadState_GetDict() - w_obj = from_ref(space, ref) - assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py --- a/pypy/module/math/test/test_direct.py +++ b/pypy/module/math/test/test_direct.py @@ -59,6 +59,9 @@ ('copysign', (1.5, -0.0), -1.5), ('copysign', (1.5, INFINITY), 1.5), ('copysign', (1.5, -INFINITY), -1.5), + ] + if sys.platform != 'win32': # all NaNs seem to be negative there...? + IRREGCASES += [ ('copysign', (1.5, NAN), 1.5), ('copysign', (1.75, -NAN), -1.75), # special case for -NAN here ] diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -37,26 +37,44 @@ 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', + 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'generic': 'interp_boxes.W_GenericBox', 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', + 'bool8': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'byte': 'interp_boxes.W_Int8Box', 'uint8': 'interp_boxes.W_UInt8Box', + 'ubyte': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'short': 'interp_boxes.W_Int16Box', 'uint16': 'interp_boxes.W_UInt16Box', + 'ushort': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'intc': 'interp_boxes.W_Int32Box', 'uint32': 'interp_boxes.W_UInt32Box', + 'uintc': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', 'uint64': 'interp_boxes.W_UInt64Box', + 'longlong': 'interp_boxes.W_LongLongBox', + 'ulonglong': 'interp_boxes.W_ULongLongBox', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', 'float_': 'interp_boxes.W_Float64Box', 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', + 'intp': 'types.IntP.BoxType', + 'uintp': 'types.UIntP.BoxType', + 'flexible': 'interp_boxes.W_FlexibleBox', + 'character': 'interp_boxes.W_CharacterBox', + 'str_': 'interp_boxes.W_StringBox', + 'unicode_': 'interp_boxes.W_UnicodeBox', + 'void': 'interp_boxes.W_VoidBox', } # ufuncs @@ -67,6 +85,7 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arctan2", "arctan2"), ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), @@ -77,7 +96,10 @@ ("true_divide", "true_divide"), ("equal", "equal"), ("exp", "exp"), + ("exp2", "exp2"), + ("expm1", "expm1"), ("fabs", "fabs"), + ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), ("greater", "greater"), @@ -92,8 +114,10 @@ ("radians", "radians"), ("degrees", "degrees"), ("deg2rad", "radians"), + ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), ("sign", "sign"), + ("signbit", "signbit"), ("sin", "sin"), ("sinh", "sinh"), ("subtract", "subtract"), @@ -106,6 +130,9 @@ ('bitwise_not', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), + ('isneginf', 'isneginf'), + ('isposinf', 'isposinf'), + ('isfinite', 'isfinite'), ('logical_and', 'logical_and'), ('logical_xor', 'logical_xor'), ('logical_not', 'logical_not'), @@ -116,6 +143,8 @@ ('log1p', 'log1p'), ('power', 'power'), ('floor_divide', 'floor_divide'), + ('logaddexp', 'logaddexp'), + ('logaddexp2', 'logaddexp2'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -33,7 +33,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat"] + "unegative", "flat", "tostring"] TWO_ARG_FUNCTIONS = ["dot", 'take'] class FakeSpace(object): @@ -51,6 +51,8 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" + w_unicode = "unicode" def __init__(self): """NOT_RPYTHON""" @@ -91,8 +93,12 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, long): + return LongObject(obj) elif isinstance(obj, W_Root): return obj + elif isinstance(obj, str): + return StringObject(obj) raise NotImplementedError def newlist(self, items): @@ -120,6 +126,11 @@ return int(w_obj.floatval) raise NotImplementedError + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj.v + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj @@ -151,7 +162,13 @@ return instantiate(klass) def newtuple(self, list_w): - raise ValueError + return ListObject(list_w) + + def newdict(self): + return {} + + def setitem(self, dict, item, value): + dict[item] = value def len_w(self, w_obj): if isinstance(w_obj, ListObject): @@ -178,6 +195,11 @@ def __init__(self, intval): self.intval = intval +class LongObject(W_Root): + tp = FakeSpace.w_long + def __init__(self, intval): + self.intval = intval + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): @@ -190,6 +212,11 @@ self.stop = stop self.step = step +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, v): + self.v = v + class InterpreterState(object): def __init__(self, code): self.code = code @@ -407,6 +434,9 @@ w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) + elif self.name == "tostring": + arr.descr_tostring(interp.space) + w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,24 +1,25 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.stringtype import str_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () def new_dtype_getter(name): - def get_dtype(space): + def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return getattr(get_dtype_cache(space), "w_%sdtype" % name) def new(space, w_subtype, w_value): - dtype = get_dtype(space) + dtype = _get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -37,6 +38,9 @@ w_subtype.getname(space, '?') ) + def get_dtype(self, space): + return self._get_dtype(space) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -44,12 +48,12 @@ return space.format(self.item(space), w_spec) def descr_int(self, space): - box = self.convert_to(W_LongBox.get_dtype(space)) + box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box.get_dtype(space)) + box = self.convert_to(W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -133,7 +137,7 @@ class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("bool") + descr__new__, _get_dtype = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): _attrs_ = () @@ -149,34 +153,40 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int8") + descr__new__, _get_dtype = new_dtype_getter("int8") class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint8") + descr__new__, _get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int16") + descr__new__, _get_dtype = new_dtype_getter("int16") class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint16") + descr__new__, _get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int32") + descr__new__, _get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint32") + descr__new__, _get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("long") + descr__new__, _get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("ulong") + descr__new__, _get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int64") + descr__new__, _get_dtype = new_dtype_getter("int64") + +class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('longlong') class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint64") + descr__new__, _get_dtype = new_dtype_getter("uint64") + +class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): _attrs_ = () @@ -185,16 +195,71 @@ _attrs_ = () class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float32") + descr__new__, _get_dtype = new_dtype_getter("float32") class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float64") + descr__new__, _get_dtype = new_dtype_getter("float64") +class W_FlexibleBox(W_GenericBox): + def __init__(self, arr, ofs, dtype): + self.arr = arr # we have to keep array alive + self.ofs = ofs + self.dtype = dtype + + def get_dtype(self, space): + return self.arr.dtype + @unwrap_spec(self=W_GenericBox) def descr_index(space, self): return space.index(self.item(space)) +class W_VoidBox(W_FlexibleBox): + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) + + @unwrap_spec(item=str) + def descr_setitem(self, space, item, w_value): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + dtype.itemtype.store(self.arr, 1, self.ofs, ofs, + dtype.coerce(space, w_value)) + +class W_CharacterBox(W_FlexibleBox): + pass + +class W_StringBox(W_CharacterBox): + def descr__new__string_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_string_dtype + + arg = space.str_w(space.str(w_arg)) + arr = W_NDimArray([1], new_string_dtype(space, len(arg))) + for i in range(len(arg)): + arr.storage[i] = arg[i] + return W_StringBox(arr, 0, arr.dtype) + + +class W_UnicodeBox(W_CharacterBox): + def descr__new__unicode_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + + arg = space.unicode_w(unicode_from_object(space, w_arg)) + arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -351,3 +416,28 @@ __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) + +W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, + __module__ = "numpypy", + __getitem__ = interp2app(W_VoidBox.descr_getitem), + __setitem__ = interp2app(W_VoidBox.descr_setitem), +) + +W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, + __module__ = "numpypy", +) + +W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), +) + +W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), +) + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,26 +1,29 @@ + +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.module.micronumpy import types, interp_boxes from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" - - -VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) +VOIDLTR = 'V' +STRINGLTR = 'S' +UNICODELTR = 'U' class W_Dtype(Wrappable): _immutable_fields_ = ["itemtype", "num", "kind"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + alternate_constructors=[], aliases=[], + fields=None, fieldnames=None, native=True): self.itemtype = itemtype self.num = num self.kind = kind @@ -29,53 +32,28 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases - - def malloc(self, length): - # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - ) + self.fields = fields + self.fieldnames = fieldnames + self.native = native @specialize.argtype(1) def box(self, value): return self.itemtype.box(value) def coerce(self, space, w_item): - return self.itemtype.coerce(space, w_item) + return self.itemtype.coerce(space, self, w_item) - def getitem(self, storage, i): - return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + def getitem(self, arr, i): + return self.itemtype.read(arr, 1, i, 0) - def getitem_bool(self, storage, i): - isize = self.itemtype.get_element_size() - return self.itemtype.read_bool(storage, isize, i, 0) + def getitem_bool(self, arr, i): + return self.itemtype.read_bool(arr, 1, i, 0) - def setitem(self, storage, i, box): - self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + def setitem(self, arr, i, box): + self.itemtype.store(arr, 1, i, 0, box) def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) - - def descr__new__(space, w_subtype, w_dtype): - cache = get_dtype_cache(space) - - if space.is_w(w_dtype, space.w_None): - return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): - name = space.str_w(w_dtype) - for dtype in cache.builtin_dtypes: - if dtype.name == name or dtype.char == name or name in dtype.aliases: - return dtype - else: - for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: - return dtype - if w_dtype is dtype.w_box_type: - return dtype - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) def descr_str(self, space): return space.wrap(self.name) @@ -86,6 +64,14 @@ def descr_get_itemsize(self, space): return space.wrap(self.itemtype.get_element_size()) + def descr_get_byteorder(self, space): + if self.native: + return space.wrap('=') + return space.wrap(nonnative_byteorder_prefix) + + def descr_get_alignment(self, space): + return space.wrap(self.itemtype.alignment) + def descr_get_shape(self, space): return space.newtuple([]) @@ -99,31 +85,193 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_get_fields(self, space): + if self.fields is None: + return space.w_None + w_d = space.newdict() + for name, (offset, subdtype) in self.fields.iteritems(): + space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, + space.wrap(offset)])) + return w_d + + def descr_get_names(self, space): + if self.fieldnames is None: + return space.w_None + return space.newtuple([space.wrap(name) for name in self.fieldnames]) + + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + if self.fields is None: + raise OperationError(space.w_KeyError, space.wrap("There are no keys in dtypes %s" % self.name)) + try: + return self.fields[item][1] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap("Field named %s not found" % item)) + def is_int_type(self): return (self.kind == SIGNEDLTR or self.kind == UNSIGNEDLTR or self.kind == BOOLLTR) + def is_signed(self): + return self.kind == SIGNEDLTR + def is_bool_type(self): return self.kind == BOOLLTR + def is_record_type(self): + return self.fields is not None + + def __repr__(self): + if self.fields is not None: + return '' % self.fields + return '' % self.itemtype + + def get_size(self): + return self.itemtype.get_element_size() + +def dtype_from_list(space, w_lst): + lst_w = space.listview(w_lst) + fields = {} + offset = 0 + ofs_and_items = [] + fieldnames = [] + for w_elem in lst_w: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc) + fldname = space.str_w(w_fldname) + if fldname in fields: + raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + assert isinstance(subdtype, W_Dtype) + fields[fldname] = (offset, subdtype) + ofs_and_items.append((offset, subdtype.itemtype)) + offset += subdtype.itemtype.get_element_size() + fieldnames.append(fldname) + itemtype = types.RecordType(ofs_and_items, offset) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()), + "V", space.gettypefor(interp_boxes.W_VoidBox), fields=fields, + fieldnames=fieldnames) + +def dtype_from_dict(space, w_dict): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from dict")) + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + w_box_type = space.gettypefor(interp_boxes.W_VoidBox) + raise OperationError(space.w_NotImplementedError, space.wrap( + "pure void dtype")) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) + +def dtype_from_spec(space, name): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from spec")) + +def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + + if space.is_w(w_dtype, space.w_None): + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_str): + name = space.str_w(w_dtype) + if ',' in name: + return dtype_from_spec(space, name) + try: + return cache.dtypes_by_name[name] + except KeyError: + pass + if name[0] in 'VSU' or name[0] in '<>=' and name[1] in 'VSU': + return variable_dtype(space, name) + elif space.isinstance_w(w_dtype, space.w_list): + return dtype_from_list(space, w_dtype) + elif space.isinstance_w(w_dtype, space.w_dict): + return dtype_from_dict(space, w_dtype) + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpypy", - __new__ = interp2app(W_Dtype.descr__new__.im_func), + __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __getitem__ = interp2app(W_Dtype.descr_getitem), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), + char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), + fields = GetSetProperty(W_Dtype.descr_get_fields), + names = GetSetProperty(W_Dtype.descr_get_names), ) W_Dtype.typedef.acceptable_as_base_class = False +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' + +def new_string_dtype(space, size): + return W_Dtype( + types.StringType(size), + num=18, + kind=STRINGLTR, + name='string', + char='S' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + ) + +def new_unicode_dtype(space, size): + return W_Dtype( + types.UnicodeType(size), + num=19, + kind=UNICODELTR, + name='unicode', + char='U' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( @@ -211,7 +359,6 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -239,18 +386,149 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - + self.w_longlongdtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name='int64', + char='q', + w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), + alternate_constructors=[space.w_long], + ) + self.w_ulonglongdtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name='uint64', + char='Q', + w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), + ) + self.w_stringdtype = W_Dtype( + types.StringType(1), + num=18, + kind=STRINGLTR, + name='string', + char='S', + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + alternate_constructors=[space.w_str], + ) + self.w_unicodedtype = W_Dtype( + types.UnicodeType(1), + num=19, + kind=UNICODELTR, + name='unicode', + char='U', + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + alternate_constructors=[space.w_unicode], + ) + self.w_voiddtype = W_Dtype( + types.VoidType(0), + num=20, + kind=VOIDLTR, + name='void', + char='V', + w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + #alternate_constructors=[space.w_buffer], + # XXX no buffer in space + ) self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, - self.w_float64dtype + self.w_longlongdtype, self.w_ulonglongdtype, + self.w_float32dtype, + self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, + self.w_voiddtype, ] self.dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) for dtype in self.builtin_dtypes ) + self.dtypes_by_name = {} + for dtype in self.builtin_dtypes: + self.dtypes_by_name[dtype.name] = dtype + can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + self.dtypes_by_name[can_name] = dtype + self.dtypes_by_name[byteorder_prefix + can_name] = dtype + self.dtypes_by_name['=' + can_name] = dtype + new_name = nonnative_byteorder_prefix + can_name + itemtypename = dtype.itemtype.__class__.__name__ + itemtype = getattr(types, 'NonNative' + itemtypename)() + self.dtypes_by_name[new_name] = W_Dtype( + itemtype, + dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, + native=False) + for alias in dtype.aliases: + self.dtypes_by_name[alias] = dtype + self.dtypes_by_name[dtype.char] = dtype + + typeinfo_full = { + 'LONGLONG': self.w_int64dtype, + 'SHORT': self.w_int16dtype, + 'VOID': self.w_voiddtype, + #'LONGDOUBLE':, + 'UBYTE': self.w_uint8dtype, + 'UINTP': self.w_ulongdtype, + 'ULONG': self.w_ulongdtype, + 'LONG': self.w_longdtype, + 'UNICODE': self.w_unicodedtype, + #'OBJECT', + 'ULONGLONG': self.w_ulonglongdtype, + 'STRING': self.w_stringdtype, + #'CDOUBLE', + #'DATETIME', + 'UINT': self.w_uint32dtype, + 'INTP': self.w_longdtype, + #'HALF', + 'BYTE': self.w_int8dtype, + #'CFLOAT': , + #'TIMEDELTA', + 'INT': self.w_int32dtype, + 'DOUBLE': self.w_float64dtype, + 'USHORT': self.w_uint16dtype, + 'FLOAT': self.w_float32dtype, + 'BOOL': self.w_booldtype, + #, 'CLONGDOUBLE'] + } + typeinfo_partial = { + 'Generic': interp_boxes.W_GenericBox, + 'Character': interp_boxes.W_CharacterBox, + 'Flexible': interp_boxes.W_FlexibleBox, + 'Inexact': interp_boxes.W_InexactBox, + 'Integer': interp_boxes.W_IntegerBox, + 'SignedInteger': interp_boxes.W_SignedIntegerBox, + 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, + #'ComplexFloating', + 'Number': interp_boxes.W_NumberBox, + 'Floating': interp_boxes.W_FloatingBox + } + w_typeinfo = space.newdict() + for k, v in typeinfo_partial.iteritems(): + space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) + for k, dtype in typeinfo_full.iteritems(): + itemsize = dtype.itemtype.get_element_size() + items_w = [space.wrap(dtype.char), + space.wrap(dtype.num), + space.wrap(itemsize * 8), # in case of changing + # number of bits per byte in the future + space.wrap(itemsize or 1)] + if dtype.is_int_type(): + if dtype.kind == BOOLLTR: + w_maxobj = space.wrap(1) + w_minobj = space.wrap(0) + elif dtype.is_signed(): + w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + else: + w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_minobj = space.wrap(0) + items_w = items_w + [w_maxobj, w_minobj] + items_w = items_w + [dtype.w_box_type] + + w_tuple = space.newtuple(items_w) + space.setitem(w_typeinfo, space.wrap(k), w_tuple) + self.w_typeinfo = w_typeinfo def get_dtype_cache(space): return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,7 +2,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate from pypy.module.micronumpy.strides import calculate_broadcast_strides,\ - calculate_slice_strides, calculate_dot_strides + calculate_slice_strides, calculate_dot_strides, enumerate_chunks """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see @@ -42,28 +42,81 @@ we can go faster. All the calculations happen in next() -next_step_x() tries to do the iteration for a number of steps at once, +next_skip_x() tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ # structures to describe slicing -class Chunk(object): +class BaseChunk(object): + pass + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + + arr = arr.get_concrete() + ofs, subdtype = arr.dtype.fields[self.name] + # strides backstrides are identical, ofs only changes start + return W_NDimSlice(arr.start + ofs, arr.strides[:], arr.backstrides[:], + arr.shape[:], arr, subdtype) + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice,\ + VirtualSlice, ConcreteArray + + shape = self.extend_shape(arr.shape) + if not isinstance(arr, ConcreteArray): + return VirtualSlice(arr, self, shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.strides, + arr.backstrides, self.l) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], arr) + + +class Chunk(BaseChunk): + axis_step = 1 + def __init__(self, start, stop, step, lgt): self.start = start self.stop = stop self.step = step self.lgt = lgt - def extend_shape(self, shape): - if self.step != 0: - shape.append(self.lgt) - def __repr__(self): return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, self.lgt) +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + class BaseTransform(object): pass @@ -95,17 +148,19 @@ raise NotImplementedError class ArrayIterator(BaseIterator): - def __init__(self, size): + def __init__(self, size, element_size): self.offset = 0 self.size = size + self.element_size = element_size def next(self, shapelen): return self.next_skip_x(1) - def next_skip_x(self, ofs): + def next_skip_x(self, x): arr = instantiate(ArrayIterator) arr.size = self.size - arr.offset = self.offset + ofs + arr.offset = self.offset + x * self.element_size + arr.element_size = self.element_size return arr def next_no_increase(self, shapelen): @@ -152,7 +207,7 @@ elif isinstance(t, ViewTransform): r = calculate_slice_strides(self.res_shape, self.offset, self.strides, - self.backstrides, t.chunks) + self.backstrides, t.chunks.l) return ViewIterator(r[1], r[2], r[3], r[0]) @jit.unroll_safe diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,10 +7,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, ViewIterator) -from pypy.module.micronumpy.strides import (calculate_slice_strides, - shape_agreement, find_shape_and_elems, get_shape_from_iterable, - calc_new_strides, to_coords) + SkipLastAxisIterator, Chunk, ViewIterator, Chunks, RecordChunk, + NewAxisChunk) +from pypy.module.micronumpy.strides import (shape_agreement, + find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi @@ -47,7 +47,7 @@ ) flat_set_driver = jit.JitDriver( greens=['shapelen', 'base'], - reds=['step', 'ai', 'lngth', 'arr', 'basei'], + reds=['step', 'lngth', 'ri', 'arr', 'basei'], name='numpy_flatset', ) @@ -79,8 +79,8 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + shape = _find_shape(space, w_size) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): @@ -235,8 +235,7 @@ return scalar_w(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - out_size = support.product(out_shape) - result = W_NDimArray(out_size, out_shape, dtype) + result = W_NDimArray(out_shape, dtype) # This is the place to add fpypy and blas return multidim_dot(space, self.get_concrete(), other.get_concrete(), result, dtype, @@ -255,7 +254,7 @@ return space.wrap(self.find_dtype().itemtype.get_element_size()) def descr_get_nbytes(self, space): - return space.wrap(self.size * self.find_dtype().itemtype.get_element_size()) + return space.wrap(self.size) @jit.unroll_safe def descr_get_shape(self, space): @@ -263,13 +262,16 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.size, w_iterable) + support.product(self.shape), w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) + + def get_size(self): + return self.size // self.find_dtype().get_size() def descr_copy(self, space): return self.copy(space) @@ -289,7 +291,7 @@ def empty_copy(self, space, dtype): shape = self.shape - return W_NDimArray(support.product(shape), shape[:], dtype, 'C') + return W_NDimArray(shape[:], dtype, 'C') def descr_len(self, space): if len(self.shape): @@ -330,7 +332,16 @@ """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape """ + if space.isinstance_w(w_idx, space.w_str): + return False shape_len = len(self.shape) + if space.isinstance_w(w_idx, space.w_tuple): + for w_item in space.fixedview(w_idx): + if (space.isinstance_w(w_item, space.w_slice) or + space.is_w(w_item, space.w_None)): + return False + elif space.is_w(w_idx, space.w_None): + return False if shape_len == 0: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -346,43 +357,55 @@ if lgt > shape_len: raise OperationError(space.w_IndexError, space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True + return lgt == shape_len @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_str): + idx = space.str_w(w_idx) + dtype = self.find_dtype() + if not dtype.is_record_type() or idx not in dtype.fields: + raise OperationError(space.w_ValueError, space.wrap( + "field named %s not defined" % idx)) + return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): - return [Chunk(*space.decode_index4(w_idx, self.shape[0]))] - return [Chunk(*space.decode_index4(w_item, self.shape[i])) for i, w_item in - enumerate(space.fixedview(w_idx))] + return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))]) + elif space.is_w(w_idx, space.w_None): + return Chunks([NewAxisChunk()]) + result = [] + i = 0 + for w_item in space.fixedview(w_idx): + if space.is_w(w_item, space.w_None): + result.append(NewAxisChunk()) + else: + result.append(Chunk(*space.decode_index4(w_item, + self.shape[i]))) + i += 1 + return Chunks(result) - def count_all_true(self, arr): - sig = arr.find_sig() - frame = sig.create_frame(arr) - shapelen = len(arr.shape) + def count_all_true(self): + sig = self.find_sig() + frame = sig.create_frame(self) + shapelen = len(self.shape) s = 0 iter = None while not frame.done(): - count_driver.jit_merge_point(arr=arr, frame=frame, iter=iter, s=s, + count_driver.jit_merge_point(arr=self, frame=frame, iter=iter, s=s, shapelen=shapelen) iter = frame.get_final_iter() - s += arr.dtype.getitem_bool(arr.storage, iter.offset) + s += self.dtype.getitem_bool(self, iter.offset) frame.next(shapelen) return s def getitem_filter(self, space, arr): concr = arr.get_concrete() - if concr.size > self.size: + if concr.get_size() > self.get_size(): raise OperationError(space.w_IndexError, space.wrap("index out of range for array")) - size = self.count_all_true(concr) - res = W_NDimArray(size, [size], self.find_dtype()) - ri = ArrayIterator(size) + size = concr.count_all_true() + res = W_NDimArray([size], self.find_dtype()) + ri = res.create_iter() shapelen = len(self.shape) argi = concr.create_iter() sig = self.find_sig() @@ -392,7 +415,7 @@ filter_driver.jit_merge_point(concr=concr, argi=argi, ri=ri, frame=frame, v=v, res=res, sig=sig, shapelen=shapelen, self=self) - if concr.dtype.getitem_bool(concr.storage, argi.offset): + if concr.dtype.getitem_bool(concr, argi.offset): v = sig.eval(frame, self) res.setitem(ri.offset, v) ri = ri.next(1) @@ -402,23 +425,6 @@ frame.next(shapelen) return res - def setitem_filter(self, space, idx, val): - size = self.count_all_true(idx) - arr = SliceArray([size], self.dtype, self, val) - sig = arr.find_sig() - shapelen = len(self.shape) - frame = sig.create_frame(arr) - idxi = idx.create_iter() - while not frame.done(): - filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, - frame=frame, arr=arr, - shapelen=shapelen) - if idx.dtype.getitem_bool(idx.storage, idxi.offset): - sig.eval(frame, arr) - frame.next_from_second(1) - frame.next_first(shapelen) - idxi = idxi.next(shapelen) - def descr_getitem(self, space, w_idx): if (isinstance(w_idx, BaseArray) and w_idx.shape == self.shape and w_idx.find_dtype().is_bool_type()): @@ -428,7 +434,24 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return self.create_slice(chunks) + return chunks.apply(self) + + def setitem_filter(self, space, idx, val): + size = idx.count_all_true() + arr = SliceArray([size], self.dtype, self, val) + sig = arr.find_sig() + shapelen = len(self.shape) + frame = sig.create_frame(arr) + idxi = idx.create_iter() + while not frame.done(): + filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, + frame=frame, arr=arr, + shapelen=shapelen) + if idx.dtype.getitem_bool(idx, idxi.offset): + sig.eval(frame, arr) + frame.next_from_second(1) + frame.next_first(shapelen) + idxi = idxi.next(shapelen) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -446,26 +469,9 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(chunks).get_concrete() + view = chunks.apply(self).get_concrete() view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, chunks): - shape = [] - i = -1 - for i, chunk in enumerate(chunks): - chunk.extend_shape(shape) - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - if not isinstance(self, ConcreteArray): - return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.shape, self.start, self.strides, - self.backstrides, chunks) - _, start, strides, backstrides = r - return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], self) - def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -482,13 +488,16 @@ w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - new_shape = get_shape_from_iterable(space, self.size, w_shape) + new_shape = get_shape_from_iterable(space, support.product(self.shape), + w_shape) return self.reshape(space, new_shape) def reshape(self, space, new_shape): concrete = self.get_concrete() # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, concrete.shape, + new_strides = None + if self.size > 0: + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides, concrete.order) if new_strides: # We can create a view, strides somehow match up. @@ -518,7 +527,7 @@ def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) - w_denom = space.wrap(self.size) + w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) @@ -537,7 +546,7 @@ concr.fill(space, w_value) def descr_nonzero(self, space): - if self.size > 1: + if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -616,8 +625,7 @@ space.wrap("axis unsupported for take")) index_i = index.create_iter() res_shape = index.shape - size = support.product(res_shape) - res = W_NDimArray(size, res_shape[:], concr.dtype, concr.order) + res = W_NDimArray(res_shape[:], concr.dtype, concr.order) res_i = res.create_iter() shapelen = len(index.shape) sig = concr.find_sig() @@ -656,6 +664,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_tostring(self, space): + ra = ToStringArray(self) + loop.compute(ra) + return space.wrap(ra.s.build()) + def compute_first_step(self, sig, frame): pass @@ -677,8 +690,7 @@ """ Intermediate class representing a literal. """ - size = 1 - _attrs_ = ["dtype", "value", "shape"] + _attrs_ = ["dtype", "value", "shape", "size"] def __init__(self, dtype, value): self.shape = [] @@ -686,6 +698,7 @@ self.dtype = dtype assert isinstance(value, interp_boxes.W_GenericBox) self.value = value + self.size = dtype.get_size() def find_dtype(self): return self.dtype @@ -703,8 +716,7 @@ return self def reshape(self, space, new_shape): - size = support.product(new_shape) - res = W_NDimArray(size, new_shape, self.dtype, 'C') + res = W_NDimArray(new_shape, self.dtype, 'C') res.setitem(0, self.value) return res @@ -718,6 +730,7 @@ self.res_dtype = res_dtype self.name = name self.res = out_arg + self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): # Function for deleting references to source arrays, @@ -725,8 +738,7 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.size, self.shape, self.res_dtype, - self.res) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) if self.res: broadcast_dims = len(self.res.shape) - len(self.shape) @@ -759,7 +771,6 @@ def __init__(self, child, chunks, shape): self.child = child self.chunks = chunks - self.size = support.product(shape) VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) def create_sig(self): @@ -771,7 +782,7 @@ def force_if_needed(self): if self.forced_result is None: concr = self.child.get_concrete() - self.forced_result = concr.create_slice(self.chunks) + self.forced_result = self.chunks.apply(concr) def _del_sources(self): self.child = None @@ -805,8 +816,6 @@ """ Intermediate class for performing binary operations. """ - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, out_arg=None): VirtualArray.__init__(self, name, shape, res_dtype, out_arg) @@ -814,7 +823,6 @@ self.left = left self.right = right self.calc_dtype = calc_dtype - self.size = support.product(self.shape) def _del_sources(self): self.left = None @@ -823,8 +831,6 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() - assert isinstance(self.left, BaseArray) - assert isinstance(self.right, BaseArray) if self.shape != self.left.shape and self.shape != self.right.shape: return signature.BroadcastBoth(self.ufunc, self.name, self.calc_dtype, @@ -844,13 +850,9 @@ self.left.create_sig(), self.right.create_sig()) class ResultArray(Call2): - def __init__(self, child, size, shape, dtype, res=None, order='C'): + def __init__(self, child, shape, dtype, res=None, order='C'): if res is None: - res = W_NDimArray(size, shape, dtype, order) - else: - assert isinstance(res, BaseArray) - #Make sure it is not a virtual array i.e. out=a+a - res = res.get_concrete() + res = W_NDimArray(shape, dtype, order) Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): @@ -862,6 +864,21 @@ self.left.create_sig(), self.right.create_sig()) return sig +class ToStringArray(Call1): + def __init__(self, child): + dtype = child.find_dtype() + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) + Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, + child) + self.res = W_NDimArray([1], dtype, 'C') + self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res.storage) + + def create_sig(self): + return signature.ToStringSignature(self.calc_dtype, + self.values.create_sig()) + def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -894,8 +911,6 @@ self.right.create_sig(), done_func) class AxisReduce(Call2): - _immutable_fields_ = ['left', 'right'] - def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim): Call2.__init__(self, ufunc, name, shape, dtype, dtype, left, right) @@ -935,13 +950,13 @@ """ _immutable_fields_ = ['storage'] - def __init__(self, size, shape, dtype, order='C', parent=None): - self.size = size + def __init__(self, shape, dtype, order='C', parent=None): self.parent = parent + self.size = support.product(shape) * dtype.get_size() if parent is not None: self.storage = parent.storage else: - self.storage = dtype.malloc(size) + self.storage = dtype.itemtype.malloc(self.size) self.order = order self.dtype = dtype if self.strides is None: @@ -960,13 +975,14 @@ return self.dtype def getitem(self, item): - return self.dtype.getitem(self.storage, item) + return self.dtype.getitem(self, item) def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value.convert_to(self.dtype)) + self.dtype.setitem(self, item, value) def calc_strides(self, shape): + dtype = self.find_dtype() strides = [] backstrides = [] s = 1 @@ -974,8 +990,8 @@ if self.order == 'C': shape_rev.reverse() for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) s *= sh if self.order == 'C': strides.reverse() @@ -1023,9 +1039,9 @@ shapelen = len(self.shape) if shapelen == 1: rffi.c_memcpy( - rffi.ptradd(self.storage, self.start * itemsize), - rffi.ptradd(w_value.storage, w_value.start * itemsize), - self.size * itemsize + rffi.ptradd(self.storage, self.start), + rffi.ptradd(w_value.storage, w_value.start), + self.size ) else: dest = SkipLastAxisIterator(self) @@ -1040,7 +1056,7 @@ dest.next() def copy(self, space): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.shape[:], self.dtype, self.order) array.setslice(space, self) return array @@ -1054,14 +1070,15 @@ class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): + def __init__(self, start, strides, backstrides, shape, parent, dtype=None): assert isinstance(parent, ConcreteArray) if isinstance(parent, W_NDimSlice): parent = parent.parent self.strides = strides self.backstrides = backstrides - ViewArray.__init__(self, support.product(shape), shape, parent.dtype, - parent.order, parent) + if dtype is None: + dtype = parent.dtype + ViewArray.__init__(self, shape, dtype, parent.order, parent) self.start = start def create_iter(self, transforms=None): @@ -1071,18 +1088,19 @@ def setshape(self, space, new_shape): if len(self.shape) < 1: return - elif len(self.shape) < 2: + elif len(self.shape) < 2 or self.size < 1: # TODO: this code could be refactored into calc_strides # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] - s = self.strides[0] + dtype = self.find_dtype() + s = self.strides[0] // dtype.get_size() if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) + s *= max(1, sh) if self.order == 'C': strides.reverse() backstrides.reverse() @@ -1109,14 +1127,16 @@ """ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value) def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) def create_iter(self, transforms=None): - return ArrayIterator(self.size).apply_transformations(self, transforms) + esize = self.find_dtype().get_size() + return ArrayIterator(self.size, esize).apply_transformations(self, + transforms) def create_sig(self): return signature.ArraySignature(self.dtype) @@ -1124,18 +1144,13 @@ def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) -def _find_size_and_shape(space, w_size): +def _find_shape(space, w_size): if space.isinstance_w(w_size, space.w_int): - size = space.int_w(w_size) - shape = [size] - else: - size = 1 - shape = [] - for w_item in space.fixedview(w_size): - item = space.int_w(w_item) - size *= item - shape.append(item) - return size, shape + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + return shape @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, @@ -1169,28 +1184,28 @@ if copy: return w_item_or_iterable.copy(space) return w_item_or_iterable - shape, elems_w = find_shape_and_elems(space, w_item_or_iterable) + if w_dtype is None or space.is_w(w_dtype, space.w_None): + dtype = None + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + shape, elems_w = find_shape_and_elems(space, w_item_or_iterable, dtype) # they come back in C order - size = len(elems_w) - if w_dtype is None or space.is_w(w_dtype, space.w_None): - w_dtype = None + if dtype is None: for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + dtype) + if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + if dtype is None: + dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + arr = W_NDimArray(shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = ArrayIterator(arr.size) + arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, + dtype.setitem(arr, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1199,22 +1214,22 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(1)) - arr = W_NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(shape[:], dtype=dtype) one = dtype.box(1) - arr.dtype.fill(arr.storage, one, 0, size) + arr.dtype.fill(arr.storage, one, 0, arr.size) return space.wrap(arr) @unwrap_spec(arr=BaseArray, skipna=bool, keepdims=bool) @@ -1262,13 +1277,13 @@ "array dimensions must agree except for axis being concatenated")) elif i == axis: shape[i] += axis_size - res = W_NDimArray(support.product(shape), shape, dtype, 'C') + res = W_NDimArray(shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: chunks[axis] = Chunk(axis_start, axis_start + arr.shape[axis], 1, arr.shape[axis]) - res.create_slice(chunks).setslice(space, arr) + Chunks(chunks).apply(res).setslice(space, arr) axis_start += arr.shape[axis] return res @@ -1356,6 +1371,7 @@ std = interp2app(BaseArray.descr_std), fill = interp2app(BaseArray.descr_fill), + tostring = interp2app(BaseArray.descr_tostring), copy = interp2app(BaseArray.descr_copy), flatten = interp2app(BaseArray.descr_flatten), @@ -1378,7 +1394,7 @@ self.iter = sig.create_frame(arr).get_final_iter() self.base = arr self.index = 0 - ViewArray.__init__(self, arr.size, [arr.size], arr.dtype, arr.order, + ViewArray.__init__(self, [arr.get_size()], arr.dtype, arr.order, arr) def descr_next(self, space): @@ -1393,7 +1409,7 @@ return self def descr_len(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) def descr_index(self, space): return space.wrap(self.index) @@ -1411,28 +1427,26 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) # setslice would have been better, but flat[u:v] for arbitrary # shapes of array a cannot be represented as a[x1:x2, y1:y2] basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) if lngth <2: return base.getitem(basei.offset) - ri = ArrayIterator(lngth) - res = W_NDimArray(lngth, [lngth], base.dtype, - base.order) + res = W_NDimArray([lngth], base.dtype, base.order) + ri = res.create_iter() while not ri.done(): flat_get_driver.jit_merge_point(shapelen=shapelen, base=base, basei=basei, step=step, res=res, - ri=ri, - ) + ri=ri) w_val = base.getitem(basei.offset) - res.setitem(ri.offset,w_val) + res.setitem(ri.offset, w_val) basei = basei.next_skip_x(shapelen, step) ri = ri.next(shapelen) return res @@ -1443,27 +1457,28 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) - ai = 0 + ri = arr.create_iter() basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) while lngth > 0: flat_set_driver.jit_merge_point(shapelen=shapelen, - basei=basei, - base=base, - step=step, - arr=arr, - ai=ai, - lngth=lngth, - ) - v = arr.getitem(ai).convert_to(base.dtype) + basei=basei, + base=base, + step=step, + arr=arr, + lngth=lngth, + ri=ri) + v = arr.getitem(ri.offset).convert_to(base.dtype) base.setitem(basei.offset, v) # need to repeat input values until all assignments are done - ai = (ai + 1) % arr.size basei = basei.next_skip_x(shapelen, step) + ri = ri.next(shapelen) + # WTF is numpy thinking? + ri.offset %= arr.size lngth -= 1 def create_sig(self): @@ -1471,9 +1486,9 @@ def create_iter(self, transforms=None): return ViewIterator(self.base.start, self.base.strides, - self.base.backstrides, - self.base.shape).apply_transformations(self.base, - transforms) + self.base.backstrides, + self.base.shape).apply_transformations(self.base, + transforms) def descr_base(self, space): return space.wrap(self.base) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -51,9 +51,11 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(num_items, [num_items], dtype=dtype) - for i, val in enumerate(items): - a.dtype.setitem(a.storage, i, val) + a = W_NDimArray([num_items], dtype=dtype) + ai = a.create_iter() + for val in items: + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) return space.wrap(a) @@ -61,6 +63,7 @@ from pypy.module.micronumpy.interp_numarray import W_NDimArray itemsize = dtype.itemtype.get_element_size() + assert itemsize >= 0 if count == -1: count = length / itemsize if length % itemsize != 0: @@ -71,20 +74,23 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(count, [count], dtype=dtype) - fromstring_loop(a, count, dtype, itemsize, s) + a = W_NDimArray([count], dtype=dtype) + fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) -fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', - 'dtype', 's', 'a']) +fromstring_driver = jit.JitDriver(greens=[], reds=['i', 'itemsize', + 'dtype', 'ai', 's', 'a']) -def fromstring_loop(a, count, dtype, itemsize, s): +def fromstring_loop(a, dtype, itemsize, s): i = 0 - while i < count: - fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, - itemsize=itemsize, s=s, i=i) + ai = a.create_iter() + while not ai.done(): + fromstring_driver.jit_merge_point(a=a, dtype=dtype, + itemsize=itemsize, s=s, i=i, + ai=ai) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) - a.dtype.setitem(a.storage, i, val) + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) i += 1 @unwrap_spec(s=str, count=int, sep=str) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,7 +28,6 @@ return self.identity def descr_call(self, space, __args__): - from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do @@ -189,7 +188,7 @@ # "mismatched dtypes")) return self.do_axis_reduce(obj, out.find_dtype(), axis, out) else: - result = W_NDimArray(support.product(shape), shape, dtype) + result = W_NDimArray(shape, dtype) return self.do_axis_reduce(obj, dtype, axis, result) if out: if len(out.shape)>0: @@ -515,6 +514,9 @@ ("greater_equal", "ge", 2, {"comparison_func": True}), ("isnan", "isnan", 1, {"bool_result": True}), ("isinf", "isinf", 1, {"bool_result": True}), + ("isneginf", "isneginf", 1, {"bool_result": True}), + ("isposinf", "isposinf", 1, {"bool_result": True}), + ("isfinite", "isfinite", 1, {"bool_result": True}), ('logical_and', 'logical_and', 2, {'comparison_func': True, 'identity': 1}), @@ -532,12 +534,16 @@ ("negative", "neg", 1), ("absolute", "abs", 1), ("sign", "sign", 1, {"promote_bools": True}), + ("signbit", "signbit", 1, {"bool_result": True}), ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ("exp2", "exp2", 1, {"promote_to_float": True}), + ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), @@ -547,6 +553,7 @@ ("arcsin", "arcsin", 1, {"promote_to_float": True}), ("arccos", "arccos", 1, {"promote_to_float": True}), ("arctan", "arctan", 1, {"promote_to_float": True}), + ("arctan2", "arctan2", 2, {"promote_to_float": True}), ("sinh", "sinh", 1, {"promote_to_float": True}), ("cosh", "cosh", 1, {"promote_to_float": True}), ("tanh", "tanh", 1, {"promote_to_float": True}), @@ -561,6 +568,8 @@ ("log2", "log2", 1, {"promote_to_float": True}), ("log10", "log10", 1, {"promote_to_float": True}), ("log1p", "log1p", 1, {"promote_to_float": True}), + ("logaddexp", "logaddexp", 2, {"promote_to_float": True}), + ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True}), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -4,6 +4,7 @@ ViewTransform, BroadcastTransform from pypy.tool.pairtype import extendabletype from pypy.module.micronumpy.loop import ComputationDone +from pypy.rlib import jit """ Signature specifies both the numpy expression that has been constructed and the assembler to be compiled. This is a very important observation - @@ -142,11 +143,10 @@ from pypy.module.micronumpy.interp_numarray import ConcreteArray concr = arr.get_concrete() assert isinstance(concr, ConcreteArray) - storage = concr.storage if self.iter_no >= len(iterlist): iterlist.append(concr.create_iter(transforms)) if self.array_no >= len(arraylist): - arraylist.append(storage) + arraylist.append(concr) def eval(self, frame, arr): iter = frame.iterators[self.iter_no] @@ -352,6 +352,20 @@ self.left._create_iter(iterlist, arraylist, arr.left, transforms) self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) +class ToStringSignature(Call1): + def __init__(self, dtype, child): + Call1.__init__(self, None, 'tostring', dtype, child) + + @jit.unroll_safe + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import ToStringArray + + assert isinstance(arr, ToStringArray) + arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + self.dtype)) + for i in range(arr.item_size): + arr.s.append(arr.res_casted[i]) + class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): self.left._invent_numbering(new_cache(), allnumbers) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,14 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError +def enumerate_chunks(chunks): + result = [] + i = -1 + for chunk in chunks: + i += chunk.axis_step + result.append((i, chunk)) + return result + @jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: jit.isconstant(len(chunks)) ) @@ -10,7 +18,7 @@ rstart = start rshape = [] i = -1 - for i, chunk in enumerate(chunks): + for i, chunk in enumerate_chunks(chunks): if chunk.step != 0: rstrides.append(strides[i] * chunk.step) rbackstrides.append(strides[i] * (chunk.lgt - 1) * chunk.step) @@ -38,22 +46,31 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides -def find_shape_and_elems(space, w_iterable): +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if space.issequence_w(w_elem): + return False + return True + +def find_shape_and_elems(space, w_iterable, dtype): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) + is_rec_type = dtype is not None and dtype.is_record_type() while True: new_batch = [] if not batch: return shape, [] - if not space.issequence_w(batch[0]): - for elem in batch: - if space.issequence_w(elem): + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape, batch size = space.len_w(batch[0]) for w_elem in batch: - if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) new_batch += space.listview(w_elem) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,6 +4,8 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) from pypy.module.micronumpy.interp_boxes import W_Float64Box +from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix,\ + byteorder_prefix from pypy.conftest import option import sys @@ -15,14 +17,16 @@ sys.modules['numpypy'] = numpy sys.modules['_numpypy'] = numpy cls.space = gettestobjspace(usemodules=['micronumpy']) + cls.w_non_native_prefix = cls.space.wrap(nonnative_byteorder_prefix) + cls.w_native_prefix = cls.space.wrap(byteorder_prefix) class TestSignature(object): def test_binop_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) - ar2 = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) + ar2 = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(2.0))) sig1 = v1.find_sig() @@ -40,7 +44,7 @@ v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() - bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray([10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.find_sig() is not v1.find_sig() assert v5.find_sig() is not v2.find_sig() @@ -57,7 +61,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.find_sig() is v2.find_sig() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,5 +1,7 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest - +from pypy.interpreter.gateway import interp2app class AppTestDtypes(BaseNumpyAppTest): def test_dtype(self): @@ -12,7 +14,10 @@ assert dtype(d) is d assert dtype(None) is dtype(float) assert dtype('int8').name == 'int8' + assert dtype(int).fields is None + assert dtype(int).names is None raises(TypeError, dtype, 1042) + raises(KeyError, 'dtype(int)["asdasd"]') def test_dtype_eq(self): from _numpypy import dtype @@ -53,13 +58,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from _numpypy import array, False_, True_, int64 + from _numpypy import array, False_, longlong a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], int64) + assert isinstance(a[0], longlong) b = a.copy() - assert isinstance(b[0], int64) + assert isinstance(b[0], longlong) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -81,17 +86,17 @@ assert a[i] is True_ def test_zeros_long(self): - from _numpypy import zeros, int64 + from _numpypy import zeros, longlong a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 0 def test_ones_long(self): - from _numpypy import ones, int64 + from _numpypy import ones, longlong a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 1 def test_overflow(self): @@ -181,17 +186,18 @@ assert dtype("float") is dtype(float) -class AppTestTypes(BaseNumpyAppTest): +class AppTestTypes(BaseNumpyAppTest): def test_abstract_types(self): import _numpypy as numpy raises(TypeError, numpy.generic, 0) raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'signedinteger' instances" + assert 'cannot create' in str(exc.value) + assert 'signedinteger' in str(exc.value) exc = raises(TypeError, numpy.unsignedinteger, 0) - assert str(exc.value) == "cannot create 'unsignedinteger' instances" - + assert 'cannot create' in str(exc.value) + assert 'unsignedinteger' in str(exc.value) raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -404,10 +410,29 @@ assert issubclass(int64, int) assert int_ is int64 + def test_various_types(self): + import _numpypy as numpy + import sys + + assert numpy.int16 is numpy.short + assert numpy.int8 is numpy.byte + assert numpy.bool_ is numpy.bool8 + if sys.maxint == (1 << 63) - 1: + assert numpy.intp is numpy.int64 + else: + assert numpy.intp is numpy.int32 + + def test_mro(self): + import _numpypy as numpy + + assert numpy.int16.__mro__ == (numpy.int16, numpy.signedinteger, + numpy.integer, numpy.number, + numpy.generic, object) + assert numpy.bool_.__mro__ == (numpy.bool_, numpy.generic, object) + def test_operators(self): from operator import truediv from _numpypy import float64, int_, True_, False_ - assert 5 / int_(2) == int_(2) assert truediv(int_(3), int_(2)) == float64(1.5) assert truediv(3, int_(2)) == float64(1.5) @@ -427,9 +452,115 @@ assert int_(3) ^ int_(5) == int_(6) assert True_ ^ False_ is True_ assert 5 ^ int_(3) == int_(6) - assert +int_(3) == int_(3) assert ~int_(3) == int_(-4) - raises(TypeError, lambda: float64(3) & 1) + def test_alternate_constructs(self): + from _numpypy import dtype + nnp = self.non_native_prefix + byteorder = self.native_prefix + assert dtype('i8') == dtype(byteorder + 'i8') == dtype('=i8') # XXX should be equal == dtype(long) + assert dtype(nnp + 'i8') != dtype('i8') + assert dtype(nnp + 'i8').byteorder == nnp + assert dtype('=i8').byteorder == '=' + assert dtype(byteorder + 'i8').byteorder == '=' + + def test_alignment(self): + from _numpypy import dtype + assert dtype('i4').alignment == 4 + + def test_typeinfo(self): + from _numpypy import typeinfo, void, number, int64, bool_ + assert typeinfo['Number'] == number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, -9223372036854775808L, int64) + assert typeinfo['VOID'] == ('V', 20, 0, 1, void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) + +class AppTestStrUnicodeDtypes(BaseNumpyAppTest): + def test_str_unicode(self): + from _numpypy import str_, unicode_, character, flexible, generic + + assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] + assert unicode_.mro() == [unicode_, unicode, basestring, character, flexible, generic, object] + + def test_str_dtype(self): + from _numpypy import dtype, str_ + + raises(TypeError, "dtype('Sx')") + d = dtype('S8') + assert d.itemsize == 8 + assert dtype(str) == dtype('S') + assert d.kind == 'S' + assert d.type is str_ + assert d.name == "string64" + assert d.num == 18 + + def test_unicode_dtype(self): + from _numpypy import dtype, unicode_ + + raises(TypeError, "dtype('Ux')") + d = dtype('U8') + assert d.itemsize == 8 * 4 + assert dtype(unicode) == dtype('U') + assert d.kind == 'U' + assert d.type is unicode_ + assert d.name == "unicode256" + assert d.num == 19 + + def test_string_boxes(self): + from _numpypy import str_ + assert isinstance(str_(3), str_) + + def test_unicode_boxes(self): + from _numpypy import unicode_ + assert isinstance(unicode_(3), unicode) + +class AppTestRecordDtypes(BaseNumpyAppTest): + def test_create(self): + from _numpypy import dtype, void + + raises(ValueError, "dtype([('x', int), ('x', float)])") + d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) + assert d.fields['x'] == (dtype('int32'), 0) + assert d.fields['value'] == (dtype(float), 12) + assert d['x'] == dtype('int32') + assert d.name == "void160" + assert d.num == 20 + assert d.itemsize == 20 + assert d.kind == 'V' + assert d.type is void + assert d.char == 'V' + assert d.names == ("x", "y", "z", "value") + raises(KeyError, 'd["xyz"]') + raises(KeyError, 'd.fields["xyz"]') + + def test_create_from_dict(self): + skip("not yet") + from _numpypy import dtype + d = dtype({'names': ['a', 'b', 'c'], + }) + +class AppTestNotDirect(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + def check_non_native(w_obj, w_obj2): + assert w_obj.storage[0] == w_obj2.storage[1] + assert w_obj.storage[1] == w_obj2.storage[0] + if w_obj.storage[0] == '\x00': + assert w_obj2.storage[1] == '\x00' + assert w_obj2.storage[0] == '\x01' + else: + assert w_obj2.storage[1] == '\x01' + assert w_obj2.storage[0] == '\x00' + cls.w_check_non_native = cls.space.wrap(interp2app(check_non_native)) + if option.runappdirect: + py.test.skip("not a direct test") + + def test_non_native(self): + from _numpypy import array + a = array([1, 2, 3], dtype=self.non_native_prefix + 'i2') + assert a[0] == 1 + assert (a + a)[1] == 4 + self.check_non_native(a, array([1, 2, 3], 'i2')) + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -5,15 +5,23 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy import signature from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.interp_iter import Chunk +from pypy.module.micronumpy.interp_iter import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class MockDtype(object): - def malloc(self, size): - return None + class itemtype(object): + @staticmethod + def malloc(size): + return None + def get_size(self): + return 1 + + +def create_slice(a, chunks): + return Chunks(chunks).apply(a) class TestNumArrayDirect(object): def newslice(self, *args): @@ -29,116 +37,116 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -374,6 +382,58 @@ assert a[1] == 0. assert a[3] == 0. + def test_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = array([range(5)]) + assert (a[newaxis] == b).all() + + def test_newaxis_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = array(range(1,5)) + c = array([range(1,5)]) + d = array([[x] for x in range(1,5)]) + + assert (a[1:] == b).all() + assert (a[1:,newaxis] == d).all() + assert (a[newaxis,1:] == c).all() + + def test_newaxis_assign(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + a[newaxis,1] = [2] + assert a[1] == 2 + + def test_newaxis_virtual(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = (a + a)[newaxis] + c = array([[0, 2, 4, 6, 8]]) + assert (b == c).all() + + def test_newaxis_then_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[newaxis] + assert b.shape == (1, 5) + assert (b[0,1:] == a[1:]).all() + + def test_slice_then_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[2:] + assert (b[newaxis] == [[2, 3, 4]]).all() + def test_scalar(self): from _numpypy import array, dtype a = array(3) @@ -434,6 +494,8 @@ a = zeros((4, 2, 3)) a.shape = (12, 2) (a + a).reshape(2, 12) # assert did not explode + a = array([[[[]]]]) + assert a.reshape((0,)).shape == (0,) def test_slice_reshape(self): from _numpypy import zeros, arange @@ -1091,7 +1153,7 @@ assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - assert array([1L, 2, 3]).dtype is dtype(long) + #assert array([1L, 2, 3]).dtype is dtype(long) assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1596,6 +1658,7 @@ a = arange(12).reshape(3,4) b = a.T.flat b[6::2] = [-1, -2] + print a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]] assert (a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]]).all() b[0:2] = [[[100]]] assert(a[0,0] == 100) @@ -1870,6 +1933,12 @@ #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) + def test_tostring(self): + from _numpypy import array + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' class AppTestRanges(BaseNumpyAppTest): def test_arange(self): @@ -1915,3 +1984,57 @@ cache = get_appbridge_cache(cls.space) cache.w_array_repr = cls.old_array_repr cache.w_array_str = cls.old_array_str + +class AppTestRecordDtype(BaseNumpyAppTest): + def test_zeros(self): + from _numpypy import zeros + a = zeros(2, dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]["xyz"]') + assert a[0]['x'] == 0 + assert a[0]['y'] == 0 + raises(ValueError, "a[0] = (1, 2, 3)") + a[0]['x'] = 13 + assert a[0]['x'] == 13 + a[1] = (1, 2) + assert a[1]['y'] == 2 + b = zeros(2, dtype=[('x', int), ('y', float)]) + b[1] = a[1] + assert a[1]['y'] == 2 + + def test_views(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + raises(ValueError, 'array([1])["x"]') + raises(ValueError, 'a["z"]') + assert a['x'][1] == 3 + assert a['y'][1] == 4 + a['x'][0] = 15 + assert a['x'][0] == 15 + b = a['x'] + a['y'] + assert (b == [15+2, 3+4]).all() + assert b.dtype == float + + def test_assign_tuple(self): + from _numpypy import zeros + a = zeros((2, 3), dtype=[('x', int), ('y', float)]) + a[1, 2] = (1, 2) + assert a['x'][1, 2] == 1 + assert a['y'][1, 2] == 2 + + def test_creation_and_repr(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + assert repr(a[0]) == '(1, 2.0)' + + def test_nested_dtype(self): + from _numpypy import zeros + a = [('x', int), ('y', float)] + b = [('x', int), ('y', a)] + arr = zeros(3, dtype=b) + arr[1]['x'] = 15 + assert arr[1]['x'] == 15 + arr[1]['y']['y'] = 3.5 + assert arr[1]['y']['y'] == 3.5 + assert arr[1]['y']['x'] == 0.0 + assert arr[1]['x'] == 15 + diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -113,14 +113,37 @@ assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_true_divide(self): + from _numpypy import array, true_divide + + a = array([0, 1, 2, 3, 4, 1, -1]) + b = array([4, 4, 4, 4, 4, 0, 0]) + c = true_divide(a, b) + assert (c == [0.0, 0.25, 0.5, 0.75, 1.0, float('inf'), float('-inf')]).all() + + assert math.isnan(true_divide(0, 0)) + def test_fabs(self): from _numpypy import array, fabs - from math import fabs as math_fabs + from math import fabs as math_fabs, isnan a = array([-5.0, -0.0, 1.0]) b = fabs(a) for i in range(3): assert b[i] == math_fabs(a[i]) + assert fabs(float('inf')) == float('inf') + assert fabs(float('-inf')) == float('inf') + assert isnan(fabs(float('nan'))) + + def test_fmod(self): + from _numpypy import fmod + import math + + assert fmod(-1e-100, 1e100) == -1e-100 + assert fmod(3, float('inf')) == 3 + assert (fmod([-3, -2, -1, 1, 2, 3], 2) == [-1, 0, -1, 1, 0, 1]).all() + for v in [float('inf'), float('-inf'), float('nan'), float('-nan')]: + assert math.isnan(fmod(v, 2)) def test_minimum(self): from _numpypy import array, minimum @@ -172,6 +195,14 @@ assert a[0] == 1 assert a[1] == 0 + def test_signbit(self): + from _numpypy import signbit, copysign + + assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == + [False, False, False, False, False, False]).all() + assert (signbit([-0, -0.0, -1, -1.0, float('-inf'), -float('nan'), float('-nan')]) == + [False, True, True, True, True, True, True]).all() + def test_reciporocal(self): from _numpypy import array, reciprocal @@ -231,13 +262,46 @@ a = array([-5.0, -0.0, 0.0, 12345678.0, float("inf"), -float('inf'), -12343424.0]) b = exp(a) - for i in range(4): + for i in range(len(a)): try: res = math.exp(a[i]) except OverflowError: res = float('inf') assert b[i] == res + def test_exp2(self): + import math + from _numpypy import array, exp2 + + a = array([-5.0, -0.0, 0.0, 2, 12345678.0, float("inf"), + -float('inf'), -12343424.0]) + b = exp2(a) + for i in range(len(a)): + try: + res = 2 ** a[i] + except OverflowError: + res = float('inf') + assert b[i] == res + + assert exp2(3) == 8 + assert math.isnan(exp2(float("nan"))) + + def test_expm1(self): + import math + from _numpypy import array, expm1 + + a = array([-5.0, -0.0, 0.0, 12345678.0, float("inf"), + -float('inf'), -12343424.0]) + b = expm1(a) + for i in range(4): + try: + res = math.exp(a[i]) - 1 + except OverflowError: + res = float('inf') + assert b[i] == res + + assert expm1(1e-50) == 1e-50 + def test_sin(self): import math from _numpypy import array, sin @@ -310,6 +374,21 @@ b = arctan(a) assert math.isnan(b[0]) + def test_arctan2(self): + import math + from _numpypy import array, arctan2 + + # From the numpy documentation + assert ( + arctan2( + [0., 0., 1., -1., float('inf'), float('inf')], + [0., -0., float('inf'), float('inf'), float('inf'), float('-inf')]) == + [0., math.pi, 0., -0., math.pi/4, 3*math.pi/4]).all() + + a = array([float('nan')]) + b = arctan2(a, 0) + assert math.isnan(b[0]) + def test_sinh(self): import math from _numpypy import array, sinh @@ -415,6 +494,19 @@ for i in range(len(a)): assert b[i] == math.degrees(a[i]) + def test_rad2deg(self): + import math + from _numpypy import rad2deg, array + a = array([ + -181, -180, -179, + 181, 180, 179, + 359, 360, 361, + 400, -1, 0, 1, + float('inf'), float('-inf')]) + b = rad2deg(a) + for i in range(len(a)): + assert b[i] == math.degrees(a[i]) + def test_reduce_errors(self): from _numpypy import sin, add @@ -510,6 +602,26 @@ assert (isinf(array([0.2, float('inf'), float('nan')])) == [False, True, False]).all() assert isinf(array([0.2])).dtype.kind == 'b' + def test_isposinf_isneginf(self): + from _numpypy import isneginf, isposinf + assert isposinf(float('inf')) + assert not isposinf(float('-inf')) + assert not isposinf(float('nan')) + assert not isposinf(0) + assert not isposinf(0.0) + assert isneginf(float('-inf')) + assert not isneginf(float('inf')) + assert not isneginf(float('nan')) + assert not isneginf(0) + assert not isneginf(0.0) + + def test_isfinite(self): + from _numpypy import isfinite + assert (isfinite([0, 0.0, 1e50, -1e-50]) == + [True, True, True, True]).all() + assert (isfinite([float('-inf'), float('inf'), float('-nan'), float('nan')]) == + [False, False, False, False]).all() + def test_logical_ops(self): from _numpypy import logical_and, logical_or, logical_xor, logical_not @@ -544,7 +656,7 @@ assert log1p(float('inf')) == float('inf') assert (log1p([0, 1e-50, math.e - 1]) == [0, 1e-50, 1]).all() - def test_power(self): + def test_power_float(self): import math from _numpypy import power, array a = array([1., 2., 3.]) @@ -558,9 +670,94 @@ for i in range(len(a)): assert c[i] == a[i] ** b[i] + assert power(2, float('inf')) == float('inf') + assert power(float('inf'), float('inf')) == float('inf') + assert power(12345.0, 12345.0) == float('inf') + assert power(-12345.0, 12345.0) == float('-inf') + assert power(-12345.0, 12346.0) == float('inf') + assert math.isnan(power(-1, 1.1)) + assert math.isnan(power(-1, -1.1)) + assert power(-2.0, -1) == -0.5 + assert power(-2.0, -2) == 0.25 + assert power(12345.0, -12345.0) == 0 + assert power(float('-inf'), 2) == float('inf') + assert power(float('-inf'), 2.5) == float('inf') + assert power(float('-inf'), 3) == float('-inf') + + def test_power_int(self): + import math + from _numpypy import power, array + a = array([1, 2, 3]) + b = power(a, 3) + for i in range(len(a)): + assert b[i] == a[i] ** 3 + + a = array([1, 2, 3]) + b = array([1, 2, 3]) + c = power(a, b) + for i in range(len(a)): + assert c[i] == a[i] ** b[i] + + # assert power(12345, 12345) == -9223372036854775808 + # assert power(-12345, 12345) == -9223372036854775808 + # assert power(-12345, 12346) == -9223372036854775808 + assert power(2, 0) == 1 + assert power(2, -1) == 0 + assert power(2, -2) == 0 + assert power(-2, -1) == 0 + assert power(-2, -2) == 0 + assert power(12345, -12345) == 0 + def test_floordiv(self): from _numpypy import floor_divide, array a = array([1., 2., 3., 4., 5., 6., 6.01]) b = floor_divide(a, 2.5) for i in range(len(a)): assert b[i] == a[i] // 2.5 + + def test_logaddexp(self): + import math + from _numpypy import logaddexp + + # From the numpy documentation + prob1 = math.log(1e-50) + prob2 = math.log(2.5e-50) + prob12 = logaddexp(prob1, prob2) + assert math.fabs(-113.87649168120691 - prob12) < 0.000000000001 + + assert logaddexp(0, 0) == math.log(2) + assert logaddexp(float('-inf'), 0) == 0 + assert logaddexp(12345678, 12345678) == float('inf') + + assert math.isnan(logaddexp(float('nan'), 1)) + assert math.isnan(logaddexp(1, float('nan'))) + assert math.isnan(logaddexp(float('nan'), float('inf'))) + assert math.isnan(logaddexp(float('inf'), float('nan'))) + assert logaddexp(float('-inf'), float('-inf')) == float('-inf') + assert logaddexp(float('-inf'), float('inf')) == float('inf') + assert logaddexp(float('inf'), float('-inf')) == float('inf') + assert logaddexp(float('inf'), float('inf')) == float('inf') + + def test_logaddexp2(self): + import math + from _numpypy import logaddexp2 + log2 = math.log(2) + + # From the numpy documentation + prob1 = math.log(1e-50) / log2 + prob2 = math.log(2.5e-50) / log2 + prob12 = logaddexp2(prob1, prob2) + assert math.fabs(-164.28904982231052 - prob12) < 0.000000000001 + + assert logaddexp2(0, 0) == 1 + assert logaddexp2(float('-inf'), 0) == 0 + assert logaddexp2(12345678, 12345678) == float('inf') + + assert math.isnan(logaddexp2(float('nan'), 1)) + assert math.isnan(logaddexp2(1, float('nan'))) + assert math.isnan(logaddexp2(float('nan'), float('inf'))) + assert math.isnan(logaddexp2(float('inf'), float('nan'))) + assert logaddexp2(float('-inf'), float('-inf')) == float('-inf') + assert logaddexp2(float('-inf'), float('inf')) == float('inf') + assert logaddexp2(float('inf'), float('-inf')) == float('inf') + assert logaddexp2(float('inf'), float('inf')) == float('inf') diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,15 +1,20 @@ import functools import math +import struct from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi -from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rlib.objectmodel import specialize, we_are_translated +from pypy.rlib.rarithmetic import widen, byteswap from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.rstruct.runpack import runpack +from pypy.tool.sourcetools import func_with_new_name +from pypy.rlib import jit +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_void': True}) degToRad = math.pi / 180.0 log2 = math.log(2) @@ -59,9 +64,20 @@ return dispatcher class BaseType(object): + _attrs_ = () + def _unimplemented_ufunc(self, *args): raise NotImplementedError + def malloc(self, size): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, size, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True) + + def __repr__(self): + return self.__class__.__name__ + class Primitive(object): _mixin_ = True @@ -76,7 +92,7 @@ assert isinstance(box, self.BoxType) return box.value - def coerce(self, space, w_item): + def coerce(self, space, dtype, w_item): if isinstance(w_item, self.BoxType): return w_item return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) @@ -97,32 +113,41 @@ def default_fromstring(self, space): raise NotImplementedError - def read(self, storage, width, i, offset): - return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset - )) + def _read(self, storage, width, i, offset): + if we_are_translated(): + return libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + return libffi.array_getitem_T(self.T, width, storage, i, offset) - def read_bool(self, storage, width, i, offset): - return bool(self.for_computation( - libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset))) + def read(self, arr, width, i, offset, dtype=None): + return self.box(self._read(arr.storage, width, i, offset)) - def store(self, storage, width, i, offset, box): - value = self.unbox(box) - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + def read_bool(self, arr, width, i, offset): + return bool(self.for_computation(self._read(arr.storage, width, i, offset))) + + def _write(self, storage, width, i, offset, value): + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + + def store(self, arr, width, i, offset, box): + self._write(arr.storage, width, i, offset, self.unbox(box)) def fill(self, storage, width, box, start, stop, offset): value = self.unbox(box) - for i in xrange(start, stop): - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + for i in xrange(start, stop, width): + self._write(storage, 1, i, offset, value) def runpack_str(self, s): return self.box(runpack(self.format_code, s)) + def pack_str(self, box): + return struct.pack(self.format_code, self.unbox(box)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -155,6 +180,14 @@ def isinf(self, v): return False + @raw_unary_op + def isneginf(self, v): + return False + + @raw_unary_op + def isposinf(self, v): + return False + @raw_binary_op def eq(self, v1, v2): return v1 == v2 @@ -206,8 +239,31 @@ def min(self, v1, v2): return min(v1, v2) +class NonNativePrimitive(Primitive): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + return byteswap(res) + + def _write(self, storage, width, i, offset, value): + value = byteswap(value) + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + return struct.pack(self.format_code, byteswap(self.unbox(box))) class Bool(BaseType, Primitive): + _attrs_ = () + T = lltype.Bool BoxType = interp_boxes.W_BoolBox format_code = "?" @@ -234,8 +290,7 @@ return space.wrap(self.unbox(w_item)) def str_format(self, box): - value = self.unbox(box) - return "True" if value else "False" + return "True" if self.unbox(box) else "False" def for_computation(self, v): return int(v) @@ -259,15 +314,18 @@ def invert(self, v): return ~v +NonNativeBool = Bool + class Integer(Primitive): _mixin_ = True + def _base_coerce(self, space, w_item): + return self.box(space.int_w(space.call_function(space.w_int, w_item))) def _coerce(self, space, w_item): - return self.box(space.int_w(space.call_function(space.w_int, w_item))) + return self._base_coerce(space, w_item) def str_format(self, box): - value = self.unbox(box) - return str(self.for_computation(value)) + return str(self.for_computation(self.unbox(box))) def for_computation(self, v): return widen(v) @@ -293,6 +351,8 @@ @simple_binary_op def pow(self, v1, v2): + if v2 < 0: + return 0 res = 1 while v2 > 0: if v2 & 1: @@ -337,68 +397,153 @@ def invert(self, v): return ~v +class NonNativeInteger(NonNativePrimitive, Integer): + _mixin_ = True + class Int8(BaseType, Integer): + _attrs_ = () + T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box format_code = "b" +NonNativeInt8 = Int8 class UInt8(BaseType, Integer): + _attrs_ = () + T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box format_code = "B" +NonNativeUInt8 = UInt8 class Int16(BaseType, Integer): + _attrs_ = () + + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + format_code = "h" + +class NonNativeInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.SHORT BoxType = interp_boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): + _attrs_ = () + + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + format_code = "H" + +class NonNativeUInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): + _attrs_ = () + + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + format_code = "i" + +class NonNativeInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.INT BoxType = interp_boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): + _attrs_ = () + + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + format_code = "I" + +class NonNativeUInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.UINT BoxType = interp_boxes.W_UInt32Box format_code = "I" class Long(BaseType, Integer): + _attrs_ = () + + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + +class NonNativeLong(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.LONG BoxType = interp_boxes.W_LongBox format_code = "l" class ULong(BaseType, Integer): + _attrs_ = () + + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + +class NonNativeULong(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.ULONG BoxType = interp_boxes.W_ULongBox format_code = "L" class Int64(BaseType, Integer): + _attrs_ = () + T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box format_code = "q" +class NonNativeInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + format_code = "q" + +def _uint64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class UInt64(BaseType, Integer): + _attrs_ = () + T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box format_code = "Q" - def _coerce(self, space, w_item): - try: - return Integer._coerce(self, space, w_item) - except OperationError, e: - if not e.match(space, space.w_OverflowError): - raise - bigint = space.bigint_w(w_item) - try: - value = bigint.toulonglong() - except OverflowError: - raise OperationError(space.w_OverflowError, space.w_None) - return self.box(value) + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + +class NonNativeUInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + format_code = "Q" + + _coerce = func_with_new_name(_uint64_coerce, '_coerce') class Float(Primitive): _mixin_ = True @@ -407,8 +552,8 @@ return self.box(space.float_w(space.call_function(space.w_float, w_item))) def str_format(self, box): - value = self.unbox(box) - return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + return float2string(self.for_computation(self.unbox(box)), "g", + rfloat.DTSF_STR_PRECISION) def for_computation(self, v): return float(v) @@ -440,7 +585,15 @@ @simple_binary_op def pow(self, v1, v2): - return math.pow(v1, v2) + try: + return math.pow(v1, v2) + except ValueError: + return rfloat.NAN + except OverflowError: + if math.modf(v2)[0] == 0 and math.modf(v2 / 2)[0] != 0: + # Odd integer powers result in the same sign as the base + return rfloat.copysign(rfloat.INFINITY, v1) + return rfloat.INFINITY @simple_binary_op def copysign(self, v1, v2): @@ -452,10 +605,21 @@ return 0.0 return rfloat.copysign(1.0, v) + @raw_unary_op + def signbit(self, v): + return rfloat.copysign(1.0, v) < 0.0 + @simple_unary_op def fabs(self, v): return math.fabs(v) + @simple_binary_op + def fmod(self, v1, v2): + try: + return math.fmod(v1, v2) + except ValueError: + return rfloat.NAN + @simple_unary_op def reciprocal(self, v): if v == 0.0: @@ -478,6 +642,20 @@ return rfloat.INFINITY @simple_unary_op + def exp2(self, v): + try: + return math.pow(2, v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def expm1(self, v): + try: + return rfloat.expm1(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op def sin(self, v): return math.sin(v) @@ -505,6 +683,10 @@ def arctan(self, v): return math.atan(v) + @simple_binary_op + def arctan2(self, v1, v2): + return math.atan2(v1, v2) + @simple_unary_op def sinh(self, v): return math.sinh(v) @@ -550,6 +732,18 @@ def isinf(self, v): return rfloat.isinf(v) + @raw_unary_op + def isneginf(self, v): + return rfloat.isinf(v) and v < 0 + + @raw_unary_op + def isposinf(self, v): + return rfloat.isinf(v) and v > 0 + + @raw_unary_op + def isfinite(self, v): + return not (rfloat.isinf(v) or rfloat.isnan(v)) + @simple_unary_op def radians(self, v): return v * degToRad @@ -601,13 +795,200 @@ except ValueError: return rfloat.NAN + @simple_binary_op + def logaddexp(self, v1, v2): + try: + v1e = math.exp(v1) + except OverflowError: + v1e = rfloat.INFINITY + try: + v2e = math.exp(v2) + except OverflowError: + v2e = rfloat.INFINITY + + v12e = v1e + v2e + try: + return math.log(v12e) + except ValueError: + if v12e == 0.0: + # CPython raises ValueError here, so we have to check + # the value to find the correct numpy return value + return -rfloat.INFINITY + return rfloat.NAN + + @simple_binary_op + def logaddexp2(self, v1, v2): + try: + v1e = math.pow(2, v1) + except OverflowError: + v1e = rfloat.INFINITY + try: + v2e = math.pow(2, v2) + except OverflowError: + v2e = rfloat.INFINITY + + v12e = v1e + v2e + try: + return math.log(v12e) / log2 + except ValueError: + if v12e == 0.0: + # CPython raises ValueError here, so we have to check + # the value to find the correct numpy return value + return -rfloat.INFINITY + return rfloat.NAN + +class NonNativeFloat(NonNativePrimitive, Float): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + #return byteswap(res) + return res + + def _write(self, storage, width, i, offset, value): + #value = byteswap(value) XXX + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + # XXX byteswap + return struct.pack(self.format_code, self.unbox(box)) + class Float32(BaseType, Float): + _attrs_ = () + T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box format_code = "f" +class NonNativeFloat32(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + format_code = "f" + class Float64(BaseType, Float): + _attrs_ = () + T = rffi.DOUBLE BoxType = interp_boxes.W_Float64Box format_code = "d" + +class NonNativeFloat64(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box + format_code = "d" + +class BaseStringType(object): + _mixin_ = True + + def __init__(self, size=0): + self.size = size + + def get_element_size(self): + return self.size * rffi.sizeof(self.T) + +class StringType(BaseType, BaseStringType): + T = lltype.Char + +class VoidType(BaseType, BaseStringType): + T = lltype.Char + +NonNativeVoidType = VoidType +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + +class RecordType(BaseType): + + T = lltype.Char + + def __init__(self, offsets_and_fields, size): + self.offsets_and_fields = offsets_and_fields + self.size = size + + def get_element_size(self): + return self.size + + def read(self, arr, width, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def coerce(self, space, dtype, w_item): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + if isinstance(w_item, interp_boxes.W_VoidBox): + return w_item + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(self.offsets_and_fields) != space.int_w(space.len(w_item)): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + # XXX optimize it out one day, but for now we just allocate an + # array + arr = W_NDimArray([1], dtype) + for i in range(len(items_w)): + subdtype = dtype.fields[dtype.fieldnames[i]][1] + ofs, itemtype = self.offsets_and_fields[i] + w_item = items_w[i] + w_box = itemtype.coerce(space, subdtype, w_item) + itemtype.store(arr, 1, 0, ofs, w_box) + return interp_boxes.W_VoidBox(arr, 0, arr.dtype) + + @jit.unroll_safe + def store(self, arr, _, i, ofs, box): + assert isinstance(box, interp_boxes.W_VoidBox) + for k in range(self.get_element_size()): + arr.storage[k + i] = box.arr.storage[k + box.ofs] + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + pieces = ["("] + first = True + for ofs, tp in self.offsets_and_fields: + if first: + first = False + else: + pieces.append(", ") + pieces.append(tp.str_format(tp.read(box.arr, 1, box.ofs, ofs))) + pieces.append(")") + return "".join(pieces) + +for tp in [Int32, Int64]: + if tp.T == lltype.Signed: + IntP = tp + break +for tp in [UInt32, UInt64]: + if tp.T == lltype.Unsigned: + UIntP = tp + break +del tp + +def _setup(): + # compute alignment + for tp in globals().values(): + if isinstance(tp, type) and hasattr(tp, 'T'): + tp.alignment = clibffi.cast_type_to_ffitype(tp.T).c_alignment +_setup() +del _setup diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -34,7 +34,7 @@ assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([ 0., 0., 0., ..., 0., 0., 0.])" - a = array(range(5), long) + a = array(range(5), int) if a.dtype.itemsize == int_size: assert repr(a) == "array([0, 1, 2, 3, 4])" else: @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -207,6 +207,11 @@ is_arguments(args) return w_some_obj() + def get_and_call_function(space, w_descr, w_obj, *args_w): + args = argument.Arguments(space, list(args_w)) + w_impl = space.get(w_descr, w_obj) + return space.call_args(w_impl, args) + def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -8,7 +8,7 @@ from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong """ @@ -546,6 +546,8 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue + if isinstance(n, (r_longlong, r_ulonglong)): + continue if isinstance(n, (str, unicode)) and len(n) == 1: continue assert n != 'default', ( diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -9,6 +9,7 @@ from pypy.rlib.rfloat import ( formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) from pypy.rlib import jit +from pypy.rlib.rarithmetic import intmask import math @@ -173,7 +174,7 @@ def hash__Complex(space, w_value): hashreal = _hash_float(space, w_value.realval) hashimg = _hash_float(space, w_value.imagval) - combined = hashreal + 1000003 * hashimg + combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) def add__Complex_Complex(space, w_complex1, w_complex2): diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -22,7 +22,7 @@ index = self.index w_length = space.len(self.w_seq) w_len = space.sub(w_length, space.wrap(index)) - if space.is_true(space.lt(w_len,space.wrap(0))): + if space.is_true(space.lt(w_len, space.wrap(0))): w_len = space.wrap(0) return w_len @@ -30,21 +30,21 @@ """Sequence iterator implementation for general sequences.""" class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing - directly their RPython-level list of wrapped objects. + """Sequence iterator specialized for lists, accessing directly their + RPython-level list of wrapped objects. """ class W_FastTupleIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for tuples, accessing - directly their RPython-level list of wrapped objects. - """ - def __init__(w_self, w_seq, wrappeditems): + """Sequence iterator specialized for tuples, accessing directly + their RPython-level list of wrapped objects. + """ + def __init__(w_self, w_seq, wrappeditems): W_AbstractSeqIterObject.__init__(w_self, w_seq) w_self.tupleitems = wrappeditems class W_ReverseSeqIterObject(W_Object): from pypy.objspace.std.itertype import reverse_iter_typedef as typedef - + def __init__(w_self, space, w_seq, index=-1): w_self.w_seq = w_seq w_self.w_len = space.len(w_seq) @@ -61,15 +61,15 @@ def next__SeqIter(space, w_seqiter): if w_seqiter.w_seq is None: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) - w_seqiter.index += 1 + raise OperationError(space.w_StopIteration, space.w_None) + w_seqiter.index += 1 return w_item # XXX __length_hint__() @@ -89,7 +89,7 @@ except IndexError: w_seqiter.tupleitems = None w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -112,7 +112,7 @@ w_item = w_seq.getitem(index) except IndexError: w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -126,15 +126,15 @@ def next__ReverseSeqIter(space, w_seqiter): if w_seqiter.w_seq is None or w_seqiter.index < 0: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) - w_seqiter.index -= 1 + w_seqiter.index -= 1 except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) return w_item # XXX __length_hint__() diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash diff --git a/pypy/objspace/std/test/test_smallintobject.py b/pypy/objspace/std/test/test_smallintobject.py --- a/pypy/objspace/std/test/test_smallintobject.py +++ b/pypy/objspace/std/test/test_smallintobject.py @@ -64,7 +64,7 @@ f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.add(f1, f2)) - assert result == x+y and type(result) == type(x+y) + assert result == x+y def test_sub(self): for x in [1, 100, sys.maxint // 2 - 50, @@ -74,15 +74,16 @@ f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.sub(f1, f2)) - assert result == x-y and type(result) == type(x-y) - + assert result == x-y + def test_mul(self): for x in [0, 1, 100, sys.maxint // 2 - 50, sys.maxint - 1000]: for y in [0, 1, 100, sys.maxint // 2 - 50, sys.maxint - 1000]: f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.mul(f1, f2)) - assert result == x*y and type(result) == type(x*y) + assert result == x*y + def test_div(self): for i in range(10): diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -10,10 +10,10 @@ from pypy import conftest cls.space = conftest.gettestobjspace(**cls.OPTIONS) cls.w_runappdirect = cls.space.wrap(bool(conftest.option.runappdirect)) - - def w_rand(self): - import random - return random.randrange(0, 5) + def rand(space): + import random + return space.wrap(random.randrange(0, 5)) + cls.w_rand = cls.space.wrap(gateway.interp2app(rand)) def test_emptyclass(self): class empty(object): pass diff --git a/pypy/rlib/bitmanipulation.py b/pypy/rlib/bitmanipulation.py --- a/pypy/rlib/bitmanipulation.py +++ b/pypy/rlib/bitmanipulation.py @@ -1,5 +1,6 @@ from pypy.rlib import unroll + class BitSplitter(dict): def __getitem__(self, lengths): if isinstance(lengths, int): diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -233,6 +233,7 @@ (rffi.LONGLONG, _signed_type_for(rffi.LONGLONG)), (lltype.UniChar, _unsigned_type_for(lltype.UniChar)), (lltype.Bool, _unsigned_type_for(lltype.Bool)), + (lltype.Char, _signed_type_for(lltype.Char)), ] __float_type_map = [ diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -35,6 +35,7 @@ cls.ulong = clibffi.cast_type_to_ffitype(rffi.ULONG) cls.slonglong = clibffi.cast_type_to_ffitype(rffi.LONGLONG) cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG) + cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED) cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar) del cls._import @@ -79,16 +80,20 @@ types._import() +# this was '_fits_into_long', which is not adequate, because long is +# not necessary the type where we compute with. Actually meant is +# the type 'Signed'. + @specialize.arg(0) -def _fits_into_long(TYPE): +def _fits_into_signed(TYPE): if isinstance(TYPE, lltype.Ptr): - return True # pointers always fits into longs + return True # pointers always fits into Signeds if not isinstance(TYPE, lltype.Primitive): return False if TYPE is lltype.Void or TYPE is rffi.FLOAT or TYPE is rffi.DOUBLE: return False sz = rffi.sizeof(TYPE) - return sz <= rffi.sizeof(rffi.LONG) + return sz <= rffi.sizeof(rffi.SIGNED) # ====================================================================== @@ -115,9 +120,9 @@ def arg(self, val): TYPE = lltype.typeOf(val) _check_type(TYPE) - if _fits_into_long(TYPE): + if _fits_into_signed(TYPE): cls = IntArg - val = rffi.cast(rffi.LONG, val) + val = rffi.cast(rffi.SIGNED, val) elif TYPE is rffi.DOUBLE: cls = FloatArg elif TYPE is rffi.LONGLONG or TYPE is rffi.ULONGLONG: @@ -250,7 +255,7 @@ if is_struct: assert types.is_struct(self.restype) res = self._do_call_raw(self.funcsym, ll_args) - elif _fits_into_long(RESULT): + elif _fits_into_signed(RESULT): assert not types.is_struct(self.restype) res = self._do_call_int(self.funcsym, ll_args) elif RESULT is rffi.DOUBLE: @@ -309,7 +314,7 @@ @jit.oopspec('libffi_call_int(self, funcsym, ll_args)') def _do_call_int(self, funcsym, ll_args): - return self._do_call(funcsym, ll_args, rffi.LONG) + return self._do_call(funcsym, ll_args, rffi.SIGNED) @jit.oopspec('libffi_call_float(self, funcsym, ll_args)') def _do_call_float(self, funcsym, ll_args): @@ -322,7 +327,7 @@ @jit.dont_look_inside def _do_call_raw(self, funcsym, ll_args): # same as _do_call_int, but marked as jit.dont_look_inside - return self._do_call(funcsym, ll_args, rffi.LONG) + return self._do_call(funcsym, ll_args, rffi.SIGNED) @jit.oopspec('libffi_call_longlong(self, funcsym, ll_args)') def _do_call_longlong(self, funcsym, ll_args): @@ -360,7 +365,7 @@ TP = lltype.Ptr(rffi.CArray(RESULT)) buf = rffi.cast(TP, ll_result) if types.is_struct(self.restype): - assert RESULT == rffi.LONG + assert RESULT == rffi.SIGNED # for structs, we directly return the buffer and transfer the # ownership res = rffi.cast(RESULT, buf) @@ -424,6 +429,11 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False +def array_getitem_T(TYPE, width, addr, index, offset): + addr = rffi.ptradd(addr, index * width) + addr = rffi.ptradd(addr, offset) + return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] + @specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): @@ -434,3 +444,8 @@ rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return assert False + +def array_setitem_T(TYPE, width, addr, index, offset, value): + addr = rffi.ptradd(addr, index * width) + addr = rffi.ptradd(addr, offset) + rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -5,7 +5,12 @@ long long to a float and back to a long long. There are corner cases in which it does not work. """ + +from pypy.annotation import model as annmodel +from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.translator.tool.cbuild import ExternalCompilationInfo # -------- implement longlong2float and float2longlong -------- @@ -16,38 +21,33 @@ # these definitions are used only in tests, when not translated def longlong2float_emulator(llval): - d_array = lltype.malloc(DOUBLE_ARRAY_PTR.TO, 1, flavor='raw') - ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) - ll_array[0] = llval - floatval = d_array[0] - lltype.free(d_array, flavor='raw') - return floatval + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: + ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) + ll_array[0] = llval + floatval = d_array[0] + return floatval -def float2longlong_emulator(floatval): - d_array = lltype.malloc(DOUBLE_ARRAY_PTR.TO, 1, flavor='raw') - ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) - d_array[0] = floatval - llval = ll_array[0] - lltype.free(d_array, flavor='raw') - return llval +def float2longlong(floatval): + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: + ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) + d_array[0] = floatval + llval = ll_array[0] + return llval def uint2singlefloat_emulator(ival): - f_array = lltype.malloc(FLOAT_ARRAY_PTR.TO, 1, flavor='raw') - i_array = rffi.cast(UINT_ARRAY_PTR, f_array) - i_array[0] = ival - singlefloatval = f_array[0] - lltype.free(f_array, flavor='raw') - return singlefloatval + with lltype.scoped_alloc(FLOAT_ARRAY_PTR.TO, 1) as f_array: + i_array = rffi.cast(UINT_ARRAY_PTR, f_array) + i_array[0] = ival + singlefloatval = f_array[0] + return singlefloatval def singlefloat2uint_emulator(singlefloatval): - f_array = lltype.malloc(FLOAT_ARRAY_PTR.TO, 1, flavor='raw') - i_array = rffi.cast(UINT_ARRAY_PTR, f_array) - f_array[0] = singlefloatval - ival = i_array[0] - lltype.free(f_array, flavor='raw') - return ival + with lltype.scoped_alloc(FLOAT_ARRAY_PTR.TO, 1) as f_array: + i_array = rffi.cast(UINT_ARRAY_PTR, f_array) + f_array[0] = singlefloatval + ival = i_array[0] + return ival -from pypy.translator.tool.cbuild import ExternalCompilationInfo eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" static double pypy__longlong2float(long long x) { @@ -56,12 +56,6 @@ memcpy(&dd, &x, 8); return dd; } -static long long pypy__float2longlong(double x) { - long long ll; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&ll, &x, 8); - return ll; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -82,12 +76,6 @@ _nowrapper=True, elidable_function=True, sandboxsafe=True, oo_primitive="pypy__longlong2float") -float2longlong = rffi.llexternal( - "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, - _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__float2longlong") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,3 +87,15 @@ _callable=singlefloat2uint_emulator, compilation_info=eci, _nowrapper=True, elidable_function=True, sandboxsafe=True, oo_primitive="pypy__singlefloat2uint") + + +class Float2LongLongEntry(ExtRegistryEntry): + _about_ = float2longlong + + def compute_result_annotation(self, s_float): + assert annmodel.SomeFloat().contains(s_float) + return annmodel.SomeInteger(knowntype=r_int64) + + def specialize_call(self, hop): + [v_float] = hop.inputargs(lltype.Float) + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) diff --git a/pypy/rlib/rarithmetic.py b/pypy/rlib/rarithmetic.py --- a/pypy/rlib/rarithmetic.py +++ b/pypy/rlib/rarithmetic.py @@ -569,3 +569,37 @@ if not objectmodel.we_are_translated(): assert n <= p return llop.int_between(lltype.Bool, n, m, p) + + at objectmodel.specialize.ll() +def byteswap(arg): + """ Convert little->big endian and the opposite + """ + from pypy.rpython.lltypesystem import lltype, rffi + + T = lltype.typeOf(arg) + # XXX we cannot do arithmetics on small ints + if isinstance(arg, base_int): + arg = widen(arg) + if rffi.sizeof(T) == 1: + res = arg + elif rffi.sizeof(T) == 2: + a, b = arg & 0xFF, arg & 0xFF00 + res = (a << 8) | (b >> 8) + elif rffi.sizeof(T) == 4: + FF = r_uint(0xFF) + arg = r_uint(arg) + a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16), + arg & (FF << 24)) + res = (a << 24) | (b << 8) | (c >> 8) | (d >> 24) + elif rffi.sizeof(T) == 8: + FF = r_ulonglong(0xFF) + arg = r_ulonglong(arg) + a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16), + arg & (FF << 24)) + e, f, g, h = (arg & (FF << 32), arg & (FF << 40), arg & (FF << 48), + arg & (FF << 56)) + res = ((a << 56) | (b << 40) | (c << 24) | (d << 8) | (e >> 8) | + (f >> 24) | (g >> 40) | (h >> 56)) + else: + assert False # unreachable code + return rffi.cast(T, res) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -1,11 +1,13 @@ """Float constants""" import math + +from pypy.annotation.model import SomeString +from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib import objectmodel -from pypy.rpython.extfunc import register_external -from pypy.annotation.model import SomeString + USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -74,7 +76,7 @@ while i < len(s) and s[i] in '0123456789': after_point += s[i] i += 1 - + if i == len(s): return sign, before_point, after_point, exponent @@ -91,7 +93,7 @@ if i == len(s): raise ValueError - + while i < len(s) and s[i] in '0123456789': exponent += s[i] i += 1 diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -226,6 +226,7 @@ # XXX should be propagate the real type, allowing # for 2*sys.maxint? high = high_ref[0] + high = rffi.cast(lltype.Signed, high) # low might just happen to have the value INVALID_FILE_SIZE # so we need to check the last error also INVALID_FILE_SIZE = -1 @@ -548,7 +549,7 @@ FILE_BEGIN = 0 high_ref = lltype.malloc(PLONG.TO, 1, flavor='raw') try: - high_ref[0] = newsize_high + high_ref[0] = rffi.cast(LONG, newsize_high) SetFilePointer(self.file_handle, newsize_low, high_ref, FILE_BEGIN) finally: @@ -710,7 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + # XXX flags is or-ed into access by now. + # check size boundaries _check_map_size(length) map_size = length @@ -792,6 +795,7 @@ offset_hi = 0 offset_lo = offset + flProtect |= flags m.map_handle = CreateFileMapping(m.file_handle, NULL, flProtect, size_hi, size_lo, m.tagname) @@ -809,6 +813,11 @@ m.map_handle = INVALID_HANDLE raise winerror + class Hint: + pos = -0x4fff0000 # for reproducible results + hint = Hint() + # XXX this has no effect on windows + def alloc(map_size): """Allocate memory. This is intended to be used by the JIT, so the memory has the executable bit set. diff --git a/pypy/rlib/rstruct/nativefmttable.py b/pypy/rlib/rstruct/nativefmttable.py --- a/pypy/rlib/rstruct/nativefmttable.py +++ b/pypy/rlib/rstruct/nativefmttable.py @@ -3,14 +3,17 @@ The table 'native_fmttable' is also used by pypy.module.array.interp_array. """ import struct -from pypy.rlib import jit + +from pypy.rlib import jit, longlong2float +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import r_singlefloat, widen from pypy.rlib.rstruct import standardfmttable as std from pypy.rlib.rstruct.error import StructError +from pypy.rlib.unroll import unrolling_iterable +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.tool import rffi_platform -from pypy.rpython.lltypesystem import lltype, rffi -from pypy.rlib.rarithmetic import r_singlefloat from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib.objectmodel import specialize + native_is_bigendian = struct.pack("=i", 1) == struct.pack(">i", 1) @@ -23,18 +26,24 @@ # ____________________________________________________________ + double_buf = lltype.malloc(rffi.DOUBLEP.TO, 1, flavor='raw', immortal=True) float_buf = lltype.malloc(rffi.FLOATP.TO, 1, flavor='raw', immortal=True) - at jit.dont_look_inside -def double_to_ccharp(doubleval): - double_buf[0] = doubleval - return rffi.cast(rffi.CCHARP, double_buf) +range_8_unroll = unrolling_iterable(list(reversed(range(8)))) +range_4_unroll = unrolling_iterable(list(reversed(range(4)))) def pack_double(fmtiter): doubleval = fmtiter.accept_float_arg() - p = double_to_ccharp(doubleval) - fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.DOUBLE)) + value = longlong2float.float2longlong(doubleval) + if fmtiter.bigendian: + for i in range_8_unroll: + x = (value >> (8*i)) & 0xff + fmtiter.result.append(chr(x)) + else: + for i in range_8_unroll: + fmtiter.result.append(chr(value & 0xff)) + value >>= 8 @specialize.argtype(0) def unpack_double(fmtiter): @@ -45,16 +54,19 @@ doubleval = double_buf[0] fmtiter.appendobj(doubleval) - at jit.dont_look_inside -def float_to_ccharp(floatval): - float_buf[0] = floatval - return rffi.cast(rffi.CCHARP, float_buf) - def pack_float(fmtiter): doubleval = fmtiter.accept_float_arg() floatval = r_singlefloat(doubleval) - p = float_to_ccharp(floatval) - fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.FLOAT)) + value = longlong2float.singlefloat2uint(floatval) + value = widen(value) + if fmtiter.bigendian: + for i in range_4_unroll: + x = (value >> (8*i)) & 0xff + fmtiter.result.append(chr(x)) + else: + for i in range_4_unroll: + fmtiter.result.append(chr(value & 0xff)) + value >>= 8 @specialize.argtype(0) def unpack_float(fmtiter): diff --git a/pypy/rlib/rstruct/runpack.py b/pypy/rlib/rstruct/runpack.py --- a/pypy/rlib/rstruct/runpack.py +++ b/pypy/rlib/rstruct/runpack.py @@ -4,11 +4,10 @@ """ import py -from struct import pack, unpack +from struct import unpack from pypy.rlib.rstruct.formatiterator import FormatIterator from pypy.rlib.rstruct.error import StructError from pypy.rlib.rstruct.nativefmttable import native_is_bigendian -from pypy.rpython.extregistry import ExtRegistryEntry class MasterReader(object): def __init__(self, s): diff --git a/pypy/rlib/rstruct/standardfmttable.py b/pypy/rlib/rstruct/standardfmttable.py --- a/pypy/rlib/rstruct/standardfmttable.py +++ b/pypy/rlib/rstruct/standardfmttable.py @@ -6,11 +6,12 @@ # values when packing. import struct + +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong +from pypy.rlib.rstruct import ieee from pypy.rlib.rstruct.error import StructError, StructOverflowError -from pypy.rlib.rstruct import ieee from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from pypy.rlib.objectmodel import specialize # In the CPython struct module, pack() unconsistently accepts inputs # that are out-of-range or floats instead of ints. Should we emulate diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -34,8 +34,8 @@ # .arg() only supports integers and floats chain = ArgChain() x = lltype.malloc(lltype.GcStruct('xxx')) - y = lltype.malloc(lltype.GcArray(rffi.LONG), 3) - z = lltype.malloc(lltype.Array(rffi.LONG), 4, flavor='raw') + y = lltype.malloc(lltype.GcArray(rffi.SIGNED), 3) + z = lltype.malloc(lltype.Array(rffi.SIGNED), 4, flavor='raw') py.test.raises(TypeError, "chain.arg(x)") py.test.raises(TypeError, "chain.arg(y)") py.test.raises(TypeError, "chain.arg(z)") @@ -100,6 +100,7 @@ def setup_class(cls): from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo + from pypy.translator.tool.cbuild import STANDARD_DEFINES from pypy.translator.platform import platform BaseFfiTest.setup_class() @@ -120,7 +121,7 @@ for match in re.finditer(" ([a-z_]+)\(", meth.__doc__): exports.append(match.group(1)) # - c_file.write(py.code.Source('\n'.join(snippets))) + c_file.write(STANDARD_DEFINES + str(py.code.Source('\n'.join(snippets)))) eci = ExternalCompilationInfo(export_symbols=exports) cls.libfoo_name = str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -157,13 +158,13 @@ def test_very_simple(self): """ - int diff_xy(int x, long y) + int diff_xy(int x, Signed y) { return x - y; } """ libfoo = self.get_libfoo() - func = (libfoo, 'diff_xy', [types.sint, types.slong], types.sint) + func = (libfoo, 'diff_xy', [types.sint, types.signed], types.sint) res = self.call(func, [50, 8], lltype.Signed) assert res == 42 @@ -206,7 +207,7 @@ """ libfoo = self.get_libfoo() func = (libfoo, 'many_args', [types.uchar, types.sint], types.sint) - res = self.call(func, [chr(20), 22], rffi.LONG) + res = self.call(func, [chr(20), 22], rffi.SIGNED) assert res == 42 def test_char_args(self): @@ -235,9 +236,9 @@ def test_pointer_as_argument(self): """#include - long inc(long* x) + Signed inc(Signed* x) { - long oldval; + Signed oldval; if (x == NULL) return -1; oldval = *x; @@ -246,15 +247,14 @@ } """ libfoo = self.get_libfoo() - func = (libfoo, 'inc', [types.pointer], types.slong) - LONGP = lltype.Ptr(rffi.CArray(rffi.LONG)) - null = lltype.nullptr(LONGP.TO) - res = self.call(func, [null], rffi.LONG) + func = (libfoo, 'inc', [types.pointer], types.signed) + null = lltype.nullptr(rffi.SIGNEDP.TO) + res = self.call(func, [null], rffi.SIGNED) assert res == -1 # - ptr_result = lltype.malloc(LONGP.TO, 1, flavor='raw') + ptr_result = lltype.malloc(rffi.SIGNEDP.TO, 1, flavor='raw') ptr_result[0] = 41 - res = self.call(func, [ptr_result], rffi.LONG) + res = self.call(func, [ptr_result], rffi.SIGNED) if self.__class__ is TestLibffiCall: # the function was called only once assert res == 41 @@ -274,21 +274,20 @@ def test_return_pointer(self): """ struct pair { - long a; - long b; + Signed a; + Signed b; }; struct pair my_static_pair = {10, 20}; - long* get_pointer_to_b() + Signed* get_pointer_to_b() { return &my_static_pair.b; } """ libfoo = self.get_libfoo() func = (libfoo, 'get_pointer_to_b', [], types.pointer) - LONGP = lltype.Ptr(rffi.CArray(rffi.LONG)) - res = self.call(func, [], LONGP) + res = self.call(func, [], rffi.SIGNEDP) assert res[0] == 20 def test_void_result(self): @@ -301,12 +300,12 @@ set_dummy = (libfoo, 'set_dummy', [types.sint], types.void) get_dummy = (libfoo, 'get_dummy', [], types.sint) # - initval = self.call(get_dummy, [], rffi.LONG) + initval = self.call(get_dummy, [], rffi.SIGNED) # res = self.call(set_dummy, [initval+1], lltype.Void) assert res is None # - res = self.call(get_dummy, [], rffi.LONG) + res = self.call(get_dummy, [], rffi.SIGNED) assert res == initval+1 def test_single_float_args(self): @@ -386,32 +385,32 @@ else: assert False, 'Did not raise' - my_raises("self.call(func, [38], rffi.LONG)") # one less - my_raises("self.call(func, [38, 12.3, 42], rffi.LONG)") # one more + my_raises("self.call(func, [38], rffi.SIGNED)") # one less + my_raises("self.call(func, [38, 12.3, 42], rffi.SIGNED)") # one more def test_byval_argument(self): """ struct Point { - long x; - long y; + Signed x; + Signed y; }; - long sum_point(struct Point p) { + Signed sum_point(struct Point p) { return p.x + p.y; } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.slong, types.slong]) + ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct - sum_point = (libfoo, 'sum_point', [ffi_point], types.slong) + sum_point = (libfoo, 'sum_point', [ffi_point], types.signed) # - ARRAY = rffi.CArray(rffi.LONG) + ARRAY = rffi.CArray(rffi.SIGNED) buf = lltype.malloc(ARRAY, 2, flavor='raw') buf[0] = 30 buf[1] = 12 adr = rffi.cast(rffi.VOIDP, buf) - res = self.call(sum_point, [('arg_raw', adr)], rffi.LONG, + res = self.call(sum_point, [('arg_raw', adr)], rffi.SIGNED, jitif=["byval"]) assert res == 42 # check that we still have the ownership on the buffer @@ -422,7 +421,7 @@ def test_byval_result(self): """ - struct Point make_point(long x, long y) { + struct Point make_point(Signed x, Signed y) { struct Point p; p.x = x; p.y = y; @@ -430,13 +429,13 @@ } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.slong, types.slong]) + ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct libfoo = CDLL(self.libfoo_name) - make_point = (libfoo, 'make_point', [types.slong, types.slong], ffi_point) + make_point = (libfoo, 'make_point', [types.signed, types.signed], ffi_point) # - PTR = lltype.Ptr(rffi.CArray(rffi.LONG)) + PTR = lltype.Ptr(rffi.CArray(rffi.SIGNED)) p = self.call(make_point, [12, 34], PTR, is_struct=True, jitif=["byval"]) assert p[0] == 12 diff --git a/pypy/rlib/test/test_rarithmetic.py b/pypy/rlib/test/test_rarithmetic.py --- a/pypy/rlib/test/test_rarithmetic.py +++ b/pypy/rlib/test/test_rarithmetic.py @@ -383,3 +383,9 @@ assert not int_between(1, 2, 2) assert not int_between(1, 1, 1) +def test_byteswap(): + from pypy.rpython.lltypesystem import rffi + + assert byteswap(rffi.cast(rffi.USHORT, 0x0102)) == 0x0201 + assert byteswap(rffi.cast(rffi.INT, 0x01020304)) == 0x04030201 + assert byteswap(rffi.cast(rffi.ULONGLONG, 0x0102030405060708L)) == 0x0807060504030201L diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -770,6 +770,10 @@ checkadr(adr) return llmemory.cast_adr_to_int(adr, mode) + def op_convert_float_bytes_to_longlong(self, f): + from pypy.rlib import longlong2float + return longlong2float.float2longlong(f) + def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -349,6 +349,7 @@ 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() + 'convert_float_bytes_to_longlong': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -18,6 +18,7 @@ from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, assert_str0 from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory +from pypy.rlib.rarithmetic import maxint, LONG_BIT import os, sys class CConstant(Symbolic): @@ -649,8 +650,9 @@ # float * FLOATP = lltype.Ptr(lltype.Array(FLOAT, hints={'nolength': True})) -# SIGNED * -SIGNEDP = lltype.Ptr(lltype.Array(lltype.Signed, hints={'nolength': True})) +# Signed, Signed * +SIGNED = lltype.Signed +SIGNEDP = lltype.Ptr(lltype.Array(SIGNED, hints={'nolength': True})) # various type mapping @@ -900,7 +902,7 @@ size = llmemory.sizeof(tp) # a symbolic result in this case return size if isinstance(tp, lltype.Ptr) or tp is llmemory.Address: - tp = ULONG # XXX! + tp = lltype.Signed if tp is lltype.Char or tp is lltype.Bool: return 1 if tp is lltype.UniChar: @@ -911,7 +913,7 @@ return 4 assert isinstance(tp, lltype.Number) if tp is lltype.Signed: - return ULONG._type.BITS/8 + return LONG_BIT/8 return tp._type.BITS/8 sizeof._annspecialcase_ = 'specialize:memo' @@ -931,11 +933,11 @@ offsetof._annspecialcase_ = 'specialize:memo' # check that we have a sane configuration -assert sys.maxint == (1 << (8 * sizeof(lltype.Signed) - 1)) - 1, ( +assert maxint == (1 << (8 * sizeof(lltype.Signed) - 1)) - 1, ( "Mixed configuration of the word size of the machine:\n\t" "the underlying Python was compiled with maxint=%d,\n\t" "but the C compiler says that 'long' is %d bytes" % ( - sys.maxint, sizeof(lltype.Signed))) + maxint, sizeof(lltype.Signed))) # ********************** some helpers ******************* diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -180,7 +180,7 @@ struct.c_three = cast(INT, 5) result = z(struct) lltype.free(struct, flavor='raw') - return cast(LONG, result) + return cast(SIGNED, result) fn = self.compile(f, [], backendopt=False) assert fn() == 8 @@ -377,7 +377,7 @@ h_source = py.code.Source(""" #ifndef _CALLBACK_H #define _CALLBACK_H - extern long eating_callback(long arg, long(*call)(long)); + extern Signed eating_callback(Signed arg, Signed(*call)(Signed)); #endif /* _CALLBACK_H */ """) @@ -385,9 +385,9 @@ h_include.write(h_source) c_source = py.code.Source(""" - long eating_callback(long arg, long(*call)(long)) + Signed eating_callback(Signed arg, Signed(*call)(Signed)) { - long res = call(arg); + Signed res = call(arg); if (res == -1) return -1; return res; @@ -399,8 +399,8 @@ separate_module_sources=[c_source], export_symbols=['eating_callback']) - args = [LONG, CCallback([LONG], LONG)] - eating_callback = llexternal('eating_callback', args, LONG, + args = [SIGNED, CCallback([SIGNED], SIGNED)] + eating_callback = llexternal('eating_callback', args, SIGNED, compilation_info=eci) return eating_callback @@ -554,13 +554,13 @@ p = make(X, c_one=cast(INT, 3)) res = p.c_one lltype.free(p, flavor='raw') - return cast(LONG, res) + return cast(SIGNED, res) assert f() == 3 assert interpret(f, []) == 3 def test_structcopy(self): - X2 = lltype.Struct('X2', ('x', LONG)) - X1 = lltype.Struct('X1', ('a', LONG), ('x2', X2), ('p', lltype.Ptr(X2))) + X2 = lltype.Struct('X2', ('x', SIGNED)) + X1 = lltype.Struct('X1', ('a', SIGNED), ('x2', X2), ('p', lltype.Ptr(X2))) def f(): p2 = make(X2, x=123) p1 = make(X1, a=5, p=p2) @@ -620,7 +620,7 @@ eci = ExternalCompilationInfo(includes=['string.h']) strlen = llexternal('strlen', [CCHARP], SIZE_T, compilation_info=eci) def f(): - return cast(LONG, strlen("Xxx")) + return cast(SIGNED, strlen("Xxx")) assert interpret(f, [], backendopt=True) == 3 def test_stringpolicy3(self): @@ -643,7 +643,7 @@ ll_str = str2charp("Xxx") res2 = strlen(ll_str) lltype.free(ll_str, flavor='raw') - return cast(LONG, res1*10 + res2) + return cast(SIGNED, res1*10 + res2) assert interpret(f, [], backendopt=True) == 43 diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -237,7 +237,7 @@ def extdef_for_os_function_returning_int(self, name, **kwds): c_func = self.llexternal(name, [], rffi.INT, **kwds) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func()) + res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -249,7 +249,7 @@ def extdef_for_os_function_accepting_int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg): - res = rffi.cast(rffi.LONG, c_func(arg)) + res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -261,7 +261,7 @@ def extdef_for_os_function_accepting_2int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg, arg2): - res = rffi.cast(rffi.LONG, c_func(arg, arg2)) + res = rffi.cast(rffi.SIGNED, c_func(arg, arg2)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -273,7 +273,7 @@ def extdef_for_os_function_accepting_0int(self, name, **kwds): c_func = self.llexternal(name, [], rffi.INT, **kwds) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func()) + res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -285,7 +285,7 @@ def extdef_for_os_function_int_to_int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg): - res = rffi.cast(rffi.LONG, c_func(arg)) + res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -438,9 +438,13 @@ UTIMBUFP = lltype.Ptr(self.UTIMBUF) os_utime = self.llexternal('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT) + if not _WIN32: + includes = ['sys/time.h'] + else: + includes = ['time.h'] class CConfig: _compilation_info_ = ExternalCompilationInfo( - includes=['sys/time.h'] + includes=includes ) HAVE_UTIMES = platform.Has('utimes') config = platform.configure(CConfig) @@ -450,9 +454,14 @@ if config['HAVE_UTIMES']: class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['sys/time.h'] - ) + if not _WIN32: + _compilation_info_ = ExternalCompilationInfo( + includes = includes + ) + else: + _compilation_info_ = ExternalCompilationInfo( + includes = ['time.h'] + ) TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)]) config = platform.configure(CConfig) @@ -557,10 +566,10 @@ # The fields of a FILETIME structure are the hi and lo parts # of a 64-bit value expressed in 100 nanosecond units # (of course). - result = (pkernel.c_dwHighDateTime*429.4967296 + - pkernel.c_dwLowDateTime*1E-7, - puser.c_dwHighDateTime*429.4967296 + - puser.c_dwLowDateTime*1E-7, + result = (rffi.cast(lltype.Signed, pkernel.c_dwHighDateTime) * 429.4967296 + + rffi.cast(lltype.Signed, pkernel.c_dwLowDateTime) * 1E-7, + rffi.cast(lltype.Signed, puser.c_dwHighDateTime) * 429.4967296 + + rffi.cast(lltype.Signed, puser.c_dwLowDateTime) * 1E-7, 0, 0, 0) lltype.free(puser, flavor='raw') lltype.free(pkernel, flavor='raw') @@ -755,7 +764,7 @@ if self.GETPGRP_HAVE_ARG: c_func = self.llexternal(name, [rffi.INT], rffi.INT) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func(0)) + res = rffi.cast(rffi.SIGNED, c_func(0)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -773,7 +782,7 @@ if self.SETPGRP_HAVE_ARG: c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func(0, 0)) + res = rffi.cast(rffi.SIGNED, c_func(0, 0)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -818,7 +827,7 @@ [traits.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT) def os_open_llimpl(path, flags, mode): - result = rffi.cast(rffi.LONG, os_open(path, flags, mode)) + result = rffi.cast(lltype.Signed, os_open(path, flags, mode)) if result == -1: raise OSError(rposix.get_errno(), "os_open failed") return result @@ -1009,7 +1018,7 @@ os_fsync = self.llexternal('_commit', [rffi.INT], rffi.INT) def fsync_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fsync(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fsync(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fsync failed") return extdef([int], s_None, @@ -1021,7 +1030,7 @@ os_fdatasync = self.llexternal('fdatasync', [rffi.INT], rffi.INT) def fdatasync_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fdatasync(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fdatasync(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fdatasync failed") return extdef([int], s_None, @@ -1033,7 +1042,7 @@ os_fchdir = self.llexternal('fchdir', [rffi.INT], rffi.INT) def fchdir_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fchdir(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fchdir(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fchdir failed") return extdef([int], s_None, @@ -1312,7 +1321,9 @@ result = os__cwait(status_p, pid, options) # shift the status left a byte so this is more # like the POSIX waitpid - status_p[0] <<= 8 + tmp = rffi.cast(rffi.SIGNED, status_p[0]) + tmp <<= 8 + status_p[0] = rffi.cast(rffi.INT, tmp) return result else: # Posix @@ -1343,7 +1354,7 @@ os_isatty = self.llexternal(underscore_on_windows+'isatty', [rffi.INT], rffi.INT) def isatty_llimpl(fd): - res = rffi.cast(rffi.LONG, os_isatty(rffi.cast(rffi.INT, fd))) + res = rffi.cast(lltype.Signed, os_isatty(rffi.cast(rffi.INT, fd))) return res != 0 return extdef([int], bool, llimpl=isatty_llimpl, diff --git a/pypy/rpython/module/ll_time.py b/pypy/rpython/module/ll_time.py --- a/pypy/rpython/module/ll_time.py +++ b/pypy/rpython/module/ll_time.py @@ -9,7 +9,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.extfunc import BaseLazyRegistering, registering, extdef from pypy.rlib import rposix -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, maxint32 from pypy.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -177,7 +177,7 @@ @registering(time.sleep) def register_time_sleep(self): if sys.platform == 'win32': - MAX = sys.maxint + MAX = maxint32 Sleep = self.llexternal('Sleep', [rffi.ULONG], lltype.Void) def time_sleep_llimpl(secs): millisecs = secs * 1000.0 diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -1295,6 +1295,8 @@ for meth in self.overloadings: ARGS = meth._TYPE.ARGS if ARGS in signatures: + # XXX Conflict on 'Signed' vs 'SignedLongLong' on win64. + # XXX note that this partially works if this error is ignored. raise TypeError, 'Bad overloading' signatures.add(ARGS) diff --git a/pypy/rpython/test/test_rptr.py b/pypy/rpython/test/test_rptr.py --- a/pypy/rpython/test/test_rptr.py +++ b/pypy/rpython/test/test_rptr.py @@ -5,6 +5,7 @@ from pypy.rpython.lltypesystem import llmemory from pypy.rpython.rtyper import RPythonTyper from pypy.annotation import model as annmodel +from pypy.rlib.rarithmetic import is_valid_int # ____________________________________________________________ @@ -188,7 +189,7 @@ return llmemory.cast_adr_to_int(a, "forced") res = interpret(fn, [2]) - assert type(res) is int + assert is_valid_int(res) assert res == cast_ptr_to_int(p) # res = interpret(fn, [4]) @@ -196,7 +197,7 @@ assert llmemory.cast_int_to_adr(res) == llmemory.cast_ptr_to_adr(p) # res = interpret(fn, [6]) - assert type(res) is int + assert is_valid_int(res) from pypy.rpython.lltypesystem import rffi assert res == rffi.cast(Signed, p) diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -710,9 +710,13 @@ PYPY_EXTERNAL_DIR = py.path.local(pypydir).join('..', '..') # XXX make this configurable if sys.platform == 'win32': - libdir = py.path.local('c:/buildslave/support') # on the bigboard buildbot - if libdir.check(): - PYPY_EXTERNAL_DIR = libdir + for libdir in [ + py.path.local('c:/buildslave/support'), # on the bigboard buildbot + py.path.local('d:/myslave'), # on the snakepit buildbot + ]: + if libdir.check(): + PYPY_EXTERNAL_DIR = libdir + break def configure_external_library(name, eci, configurations, symbol=None, _cache={}): @@ -790,9 +794,15 @@ if platform is None: from pypy.translator.platform import platform if sys.platform == 'win32': - library_dir = 'Release' - libraries = ['gc'] - includes=['gc.h'] + import platform as host_platform # just to ask for the arch. Confusion-alert! + if host_platform.architecture()[0] == '32bit': + library_dir = 'Release' + libraries = ['gc'] + includes=['gc.h'] + else: + library_dir = '' + libraries = ['gc64_dll'] + includes = ['gc.h'] else: library_dir = '' libraries = ['gc', 'dl'] diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -27,7 +27,7 @@ #define OP_FLOAT_SUB(x,y,r) r = x - y #define OP_FLOAT_MUL(x,y,r) r = x * y #define OP_FLOAT_TRUEDIV(x,y,r) r = x / y -#define OP_FLOAT_POW(x,y,r) r = pow(x, y) +#define OP_FLOAT_POW(x,y,r) r = pow(x, y) /*** conversions ***/ @@ -42,5 +42,6 @@ #ifdef HAVE_LONG_LONG #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) +#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) #endif diff --git a/pypy/translator/cli/sdk.py b/pypy/translator/cli/sdk.py --- a/pypy/translator/cli/sdk.py +++ b/pypy/translator/cli/sdk.py @@ -103,6 +103,11 @@ mono_bin = find_mono_on_windows() if mono_bin is not None: SDK.ILASM = os.path.join(mono_bin, 'ilasm2.bat') + # XXX the failing tests are boring, and the SDK is usually installed + # on windows. I do not care right now, because the Linux buildbots + # don't test this at all... + if platform.architecture()[0] == '64bit': + py.test.skip('mono on 64bit is not well enough supported') else: SDK = MonoSDK return SDK diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -4,6 +4,20 @@ """ __all__ = ['main'] +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") + from _pytest.core import main, UsageError, _preloadplugins from _pytest import core as cmdline from _pytest import __version__ From noreply at buildbot.pypy.org Thu Mar 22 23:22:23 2012 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 Mar 2012 23:22:23 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: fix merge Message-ID: <20120322222223.A6D6282438@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53933:593cf5edb34e Date: 2012-03-22 23:35 +0200 http://bitbucket.org/pypy/pypy/changeset/593cf5edb34e/ Log: fix merge diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -744,12 +744,12 @@ broadcast_dims = len(self.res.shape) - len(self.shape) chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ [Chunk(0, i, 1, i) for i in self.shape] - return ra.left.create_slice(chunks) + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -979,7 +979,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,6 +28,7 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do From noreply at buildbot.pypy.org Fri Mar 23 07:47:26 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Mar 2012 07:47:26 +0100 (CET) Subject: [pypy-commit] pypy default: Bah. The code is correct and the test wrong, for a double reason. :-( Message-ID: <20120323064726.3D61282112@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53934:4a21a85f7d3d Date: 2012-03-22 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4a21a85f7d3d/ Log: Bah. The code is correct and the test wrong, for a double reason. :-( diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,7 +601,9 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers. + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) From noreply at buildbot.pypy.org Fri Mar 23 07:47:28 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Mar 2012 07:47:28 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120323064728.6449D82112@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53935:3f6079ca9f34 Date: 2012-03-23 07:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3f6079ca9f34/ Log: merge heads diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -108,6 +108,7 @@ def setup_class(cls): cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py @@ -5,6 +6,7 @@ class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False + extra_modules = ['array'] def test_basic(self): module = self.import_module(name='array') diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -165,8 +165,11 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + extra_modules = [] + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'] + + cls.extra_modules) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1125,7 +1125,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1171,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Fri Mar 23 09:45:50 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 09:45:50 +0100 (CET) Subject: [pypy-commit] pypy default: fix few tests on 32 bit Message-ID: <20120323084550.D076F8445A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53936:ce6657240c20 Date: 2012-03-23 10:45 +0200 http://bitbucket.org/pypy/pypy/changeset/ce6657240c20/ Log: fix few tests on 32 bit diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,7 +545,7 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue - if isinstance(n, (r_longlong, r_ulonglong)): + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): continue if isinstance(n, (str, unicode)) and len(n) == 1: continue From noreply at buildbot.pypy.org Fri Mar 23 10:12:21 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 10:12:21 +0100 (CET) Subject: [pypy-commit] pypy default: 32bit fixes and cleanups Message-ID: <20120323091221.A63F982112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53937:80e81082ac4d Date: 2012-03-23 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/80e81082ac4d/ Log: 32bit fixes and cleanups diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -359,6 +359,7 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -386,23 +387,6 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - self.w_longlongdtype = W_Dtype( - types.Int64(), - num=9, - kind=SIGNEDLTR, - name='int64', - char='q', - w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), - alternate_constructors=[space.w_long], - ) - self.w_ulonglongdtype = W_Dtype( - types.UInt64(), - num=10, - kind=UNSIGNEDLTR, - name='uint64', - char='Q', - w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), - ) self.w_stringdtype = W_Dtype( types.StringType(1), num=18, @@ -435,14 +419,14 @@ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_longlongdtype, self.w_ulonglongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} for dtype in self.builtin_dtypes: @@ -473,7 +457,7 @@ 'LONG': self.w_longdtype, 'UNICODE': self.w_unicodedtype, #'OBJECT', - 'ULONGLONG': self.w_ulonglongdtype, + 'ULONGLONG': self.w_uint64dtype, 'STRING': self.w_stringdtype, #'CDOUBLE', #'DATETIME', diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -314,7 +314,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -333,15 +333,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -500,6 +500,19 @@ BoxType = interp_boxes.W_ULongBox format_code = "L" +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): _attrs_ = () @@ -507,6 +520,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + class NonNativeInt64(BaseType, NonNativeInteger): _attrs_ = () @@ -514,6 +529,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + def _uint64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) From noreply at buildbot.pypy.org Fri Mar 23 10:25:07 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 10:25:07 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup the 32bit situation Message-ID: <20120323092507.6096382112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53938:107df78c3226 Date: 2012-03-23 11:24 +0200 http://bitbucket.org/pypy/pypy/changeset/107df78c3226/ Log: cleanup the 32bit situation diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -429,7 +429,9 @@ for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} - for dtype in self.builtin_dtypes: + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): self.dtypes_by_name[dtype.name] = dtype can_name = dtype.kind + str(dtype.itemtype.get_element_size()) self.dtypes_by_name[can_name] = dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -302,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys From noreply at buildbot.pypy.org Fri Mar 23 11:49:30 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 11:49:30 +0100 (CET) Subject: [pypy-commit] pypy default: bah :-( Message-ID: <20120323104930.02CDC8445A@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53939:86cce9710b22 Date: 2012-03-23 11:48 +0100 http://bitbucket.org/pypy/pypy/changeset/86cce9710b22/ Log: bah :-( cpyext rely on some kind of global state which I could not sort out. As a consequence, if you try to instantiate two different objspaces in the same process, the second one explodes. The only way to make it working is to make sure that *all* gettestobjspace calls have they very same config: this way, there is a cache which reuses the already-built objspace, so we don't build a second one and things work. diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -6,7 +6,6 @@ class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False - extra_modules = ['array'] def test_basic(self): module = self.import_module(name='array') diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,11 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): - extra_modules = [] def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'] + - cls.extra_modules) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts From noreply at buildbot.pypy.org Fri Mar 23 15:39:07 2012 From: noreply at buildbot.pypy.org (hager) Date: Fri, 23 Mar 2012 15:39:07 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: factor out common code Message-ID: <20120323143907.448DE82112@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r53940:86b9bb6c612b Date: 2012-03-23 07:37 -0700 http://bitbucket.org/pypy/pypy/changeset/86b9bb6c612b/ Log: factor out common code diff --git a/pypy/jit/backend/ppc/arch.py b/pypy/jit/backend/ppc/arch.py --- a/pypy/jit/backend/ppc/arch.py +++ b/pypy/jit/backend/ppc/arch.py @@ -30,3 +30,9 @@ SIZE_LOAD_IMM_PATCH_SP = 6 FORCE_INDEX_OFS = len(MANAGED_REGS) * WORD + +# offset to LR in BACKCHAIN +if IS_PPC_32: + LR_BC_OFFSET = WORD +else: + LR_BC_OFFSET = 2 * WORD diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py --- a/pypy/jit/backend/ppc/codebuilder.py +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -3,7 +3,8 @@ from pypy.jit.backend.ppc.locations import RegisterLocation from pypy.jit.backend.ppc.ppc_field import ppc_fields from pypy.jit.backend.ppc.assembler import Assembler -from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64) +from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64, + LR_BC_OFFSET) import pypy.jit.backend.ppc.register as r from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from pypy.rpython.lltypesystem import lltype, rffi @@ -1066,6 +1067,24 @@ if IS_PPC_64: self.load(r.TOC.value, r.SP.value, 5 * WORD) + def make_function_prologue(self, frame_size): + """ Build a new stackframe of size frame_size + and store the LR in the previous frame. + """ + with scratch_reg(self): + self.store_update(r.SP.value, r.SP.value, -frame_size) + self.mflr(r.SCRATCH.value) + self.store(r.SCRATCH.value, r.SP.value, frame_size + LR_BC_OFFSET) + + def restore_LR_from_caller_frame(self, frame_size): + """ Restore the LR from the calling frame. + frame_size is the size of the current frame. + """ + with scratch_reg(self): + lr_offset = frame_size + LR_BC_OFFSET + self.load(r.SCRATCH.value, r.SP.value, lr_offset) + self.mtlr(r.SCRATCH.value) + def load(self, target_reg, base_reg, offset): if IS_PPC_32: self.lwz(target_reg, base_reg, offset) @@ -1090,6 +1109,12 @@ else: self.stdx(from_reg, base_reg, offset_reg) + def store_update(self, target_reg, from_reg, offset): + if IS_PPC_32: + self.stwu(target_reg, from_reg, offset) + else: + self.stdu(target_reg, from_reg, offset) + def srli_op(self, target_reg, from_reg, numbits): if IS_PPC_32: self.srwi(target_reg, from_reg, numbits) diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/pypy/jit/backend/ppc/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppc_assembler.py @@ -121,16 +121,8 @@ # The code generated here allocates a new stackframe # and is the first machine code to be executed. def _make_frame(self, frame_depth): - if IS_PPC_32: - # save it in previous frame (Backchain) - self.mc.stwu(r.SP.value, r.SP.value, -frame_depth) - self.mc.mflr(r.SCRATCH.value) # move old link register - # save old link register in previous frame - self.mc.stw(r.SCRATCH.value, r.SP.value, frame_depth + WORD) - else: - self.mc.stdu(r.SP.value, r.SP.value, -frame_depth) - self.mc.mflr(r.SCRATCH.value) - self.mc.std(r.SCRATCH.value, r.SP.value, frame_depth + 2 * WORD) + self.mc.make_function_prologue(frame_depth) + # save SPP at the bottom of the stack frame self.mc.store(r.SPP.value, r.SP.value, WORD) @@ -353,7 +345,7 @@ ofs = WORD else: ofs = WORD * 2 - + with scratch_reg(mc): mc.load(r.SCRATCH.value, r.SP.value, frame_size + ofs) mc.mtlr(r.SCRATCH.value) @@ -421,15 +413,7 @@ mc.write32(0) # build frame - with scratch_reg(mc): - if IS_PPC_32: - mc.stwu(r.SP.value, r.SP.value, -frame_size) - mc.mflr(r.SCRATCH.value) - mc.stw(r.SCRATCH.value, r.SP.value, frame_size + WORD) - else: - mc.stdu(r.SP.value, r.SP.value, -frame_size) - mc.mflr(r.SCRATCH.value) - mc.std(r.SCRATCH.value, r.SP.value, frame_size + 2 * WORD) + mc.make_function_prologue(frame_size) # save parameter registers for i, reg in enumerate(r.PARAM_REGS): @@ -456,14 +440,7 @@ mc.load(reg.value, r.SP.value, (i + BACKCHAIN_SIZE) * WORD) # restore LR - with scratch_reg(mc): - lr_offset = frame_size + WORD - if IS_PPC_64: - lr_offset += WORD - - mc.load(r.SCRATCH.value, r.SP.value, - lr_offset) - mc.mtlr(r.SCRATCH.value) + mc.restore_LR_from_caller_frame(frame_size) # reset SP mc.addi(r.SP.value, r.SP.value, frame_size) @@ -629,27 +606,13 @@ # | | # ============================== <- SP - if IS_PPC_32: - self.mc.stwu(r.SP.value, r.SP.value, -frame_size) - self.mc.mflr(r.SCRATCH.value) - self.mc.stw(r.SCRATCH.value, r.SP.value, frame_size + WORD) - else: - self.mc.stdu(r.SP.value, r.SP.value, -frame_size) - self.mc.mflr(r.SCRATCH.value) - self.mc.std(r.SCRATCH.value, r.SP.value, frame_size + 2 * WORD) + self.mc.make_function_prologue(frame_size) # make check self.mc.call(self.stack_check_slowpath) # restore LR - with scratch_reg(self.mc): - lr_offset = frame_size + WORD - if IS_PPC_64: - lr_offset += WORD - - self.mc.load(r.SCRATCH.value, r.SP.value, - lr_offset) - self.mc.mtlr(r.SCRATCH.value) + self.mc.restore_LR_from_caller_frame(frame_size) # remove minimal frame self.mc.addi(r.SP.value, r.SP.value, frame_size) From noreply at buildbot.pypy.org Fri Mar 23 15:48:18 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 15:48:18 +0100 (CET) Subject: [pypy-commit] pypy default: Start writing a doc Message-ID: <20120323144818.74B0E82112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53941:97c883fd0fd7 Date: 2012-03-23 15:30 +0200 http://bitbucket.org/pypy/pypy/changeset/97c883fd0fd7/ Log: Start writing a doc diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,68 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time and then flipping a giant switch. It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 mile view: + +* `RPython`_ is a language in which we write interpreter in PyPy. Not the entire + PyPy project is written in RPython, only parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + RPython standard library is to be found in ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* Translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* JIT + + xxx + +* Garbage Collectors + + xxx + +Toolset +======= + +xxx From noreply at buildbot.pypy.org Fri Mar 23 16:31:32 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 16:31:32 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: start a blog post Message-ID: <20120323153132.47C8182112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4161:ebde55f615de Date: 2012-03-23 16:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/ebde55f615de/ Log: start a blog post diff --git a/blog/draft/pycon-wrapup.rst b/blog/draft/pycon-wrapup.rst new file mode 100644 --- /dev/null +++ b/blog/draft/pycon-wrapup.rst @@ -0,0 +1,27 @@ +PyCon 2012 +========== + +So, PyCon happened. This was the biggest PyCon ever and probably the biggest +gathering of Python hackers ever. + +From the PyPy perspective, a lot at PyCon was about PyPy. Listing things: + +* David Beazley did an excellent keynote on trying to dive head-first into + PyPy and at least partly failing. He however did not fail to explain + bits and pieces about PyPy's architecture. `Video`_ is available. + +* We gave tons of talks, including the `tutorial`_ and `why pypy by example`_. + +* We had a giant influx of new commiters, easily doubling the amount of pull + requests ever created for PyPy. The main topics for newcomers were numpy and + py3k, disproving what David said about PyPy being too hard to dive into ;) + +* Guido argued in his keynote that Python is not too slow. In the meantime, + we're trying to `prove him correct`_ :-) + +* XXX stuff stuff + +.. _`Video`: xxx +.. _`tutorial`: xxx +.. _`why pypy by example`: xxx +.. _`prove him correct`: http://mrjoes.github.com/2011/12/15/sockjs-bench.html From noreply at buildbot.pypy.org Fri Mar 23 16:31:33 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 16:31:33 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20120323153133.75CCF82112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4162:4a8442f9e157 Date: 2012-03-23 17:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/4a8442f9e157/ Log: merge diff --git a/blog/draft/pycon-wrapup.rst b/blog/draft/pycon-wrapup.rst new file mode 100644 --- /dev/null +++ b/blog/draft/pycon-wrapup.rst @@ -0,0 +1,27 @@ +PyCon 2012 +========== + +So, PyCon happened. This was the biggest PyCon ever and probably the biggest +gathering of Python hackers ever. + +From the PyPy perspective, a lot at PyCon was about PyPy. Listing things: + +* David Beazley did an excellent keynote on trying to dive head-first into + PyPy and at least partly failing. He however did not fail to explain + bits and pieces about PyPy's architecture. `Video`_ is available. + +* We gave tons of talks, including the `tutorial`_ and `why pypy by example`_. + +* We had a giant influx of new commiters, easily doubling the amount of pull + requests ever created for PyPy. The main topics for newcomers were numpy and + py3k, disproving what David said about PyPy being too hard to dive into ;) + +* Guido argued in his keynote that Python is not too slow. In the meantime, + we're trying to `prove him correct`_ :-) + +* XXX stuff stuff + +.. _`Video`: xxx +.. _`tutorial`: xxx +.. _`why pypy by example`: xxx +.. _`prove him correct`: http://mrjoes.github.com/2011/12/15/sockjs-bench.html From noreply at buildbot.pypy.org Fri Mar 23 16:35:39 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 23 Mar 2012 16:35:39 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge Message-ID: <20120323153539.98CD38445A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53942:0d31ed29ac76 Date: 2012-03-23 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/0d31ed29ac76/ Log: Merge diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,68 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time and then flipping a giant switch. It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 mile view: + +* `RPython`_ is a language in which we write interpreter in PyPy. Not the entire + PyPy project is written in RPython, only parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + RPython standard library is to be found in ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* Translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* JIT + + xxx + +* Garbage Collectors + + xxx + +Toolset +======= + +xxx diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,7 +601,9 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers. + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -972,10 +972,16 @@ from pypy.rlib.longlong2float import float2longlong def f(x): return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %f0 -> %i0 - int_return %i0 - """) + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -90,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -179,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -359,6 +359,7 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -386,23 +387,6 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - self.w_longlongdtype = W_Dtype( - types.Int64(), - num=9, - kind=SIGNEDLTR, - name='int64', - char='q', - w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), - alternate_constructors=[space.w_long], - ) - self.w_ulonglongdtype = W_Dtype( - types.UInt64(), - num=10, - kind=UNSIGNEDLTR, - name='uint64', - char='Q', - w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), - ) self.w_stringdtype = W_Dtype( types.StringType(1), num=18, @@ -435,17 +419,19 @@ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_longlongdtype, self.w_ulonglongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} - for dtype in self.builtin_dtypes: + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): self.dtypes_by_name[dtype.name] = dtype can_name = dtype.kind + str(dtype.itemtype.get_element_size()) self.dtypes_by_name[can_name] = dtype @@ -473,7 +459,7 @@ 'LONG': self.w_longdtype, 'UNICODE': self.w_unicodedtype, #'OBJECT', - 'ULONGLONG': self.w_ulonglongdtype, + 'ULONGLONG': self.w_uint64dtype, 'STRING': self.w_stringdtype, #'CDOUBLE', #'DATETIME', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1125,7 +1125,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1171,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -314,7 +314,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -302,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -333,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -500,6 +500,19 @@ BoxType = interp_boxes.W_ULongBox format_code = "L" +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): _attrs_ = () @@ -507,6 +520,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + class NonNativeInt64(BaseType, NonNativeInteger): _attrs_ = () @@ -514,6 +529,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + def _uint64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,7 +545,7 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue - if isinstance(n, (r_longlong, r_ulonglong)): + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): continue if isinstance(n, (str, unicode)) and len(n) == 1: continue diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -770,6 +770,10 @@ checkadr(adr) return llmemory.cast_adr_to_int(adr, mode) + def op_convert_float_bytes_to_longlong(self, f): + from pypy.rlib import longlong2float + return longlong2float.float2longlong(f) + def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -349,6 +349,7 @@ 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() + 'convert_float_bytes_to_longlong': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Fri Mar 23 16:36:59 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Mar 2012 16:36:59 +0100 (CET) Subject: [pypy-commit] pypy default: Small fixes Message-ID: <20120323153659.7136782112@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53943:ed7d6543550b Date: 2012-03-23 16:36 +0100 http://bitbucket.org/pypy/pypy/changeset/ed7d6543550b/ Log: Small fixes diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -16,7 +16,9 @@ It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often the development means progressing in an unrelated corner, one unittest - at a time and then flipping a giant switch. It's worth repeating - PyPy + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy approach is great if you do TDD, not so great otherwise. * PyPy uses an entirely different set of tools - most of them included @@ -25,21 +27,21 @@ Architecture ============ -PyPy has layers. The 100 mile view: +PyPy has layers. The 100 miles view: -* `RPython`_ is a language in which we write interpreter in PyPy. Not the entire - PyPy project is written in RPython, only parts that are compiled in +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, it's compiled from the live python objects, which make it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. - RPython standard library is to be found in ``rlib`` subdirectory. + The RPython standard library is to be found in the ``rlib`` subdirectory. .. _`RPython`: coding-guide.html#RPython -* Translation toolchain - this is the part that takes care about translating - RPython to flow graphs and then to C. There is more in `architecture`_ +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. From noreply at buildbot.pypy.org Fri Mar 23 16:47:05 2012 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 23 Mar 2012 16:47:05 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge default Message-ID: <20120323154705.7673F82112@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r53944:7f1c4b6b1ad1 Date: 2012-03-23 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/7f1c4b6b1ad1/ Log: merge default diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/__init__.py b/pypy/__init__.py --- a/pypy/__init__.py +++ b/pypy/__init__.py @@ -1,1 +1,16 @@ # Empty + +# XXX Should be empty again, soon. +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -148,7 +148,6 @@ "the attribute here; the list of read locations is:\n" + '\n'.join([str(loc[0]) for loc in self.read_locations])) - class ClassDef(object): "Wraps a user class." diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1336,7 +1336,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -5,7 +5,7 @@ BoxInt, Box, BoxPtr, JitCellToken, TargetToken, ConstInt, ConstPtr, - BoxObj, Const, + BoxObj, ConstObj, BoxFloat, ConstFloat) from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.typesystem import deref @@ -16,9 +16,11 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException from pypy.jit.codewriter import heaptracker, longlong +from pypy.rlib import longlong2float from pypy.rlib.rarithmetic import intmask, is_valid_int from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -1655,13 +1657,28 @@ def test_read_timestamp(self): if not self.cpu.supports_longlong: py.test.skip("longlong test") + # so we stretch the time a little bit. + # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini, + # the test starts working at delay == 21670 and stops at 20600000. + # We take the geometric mean value. + from math import log, exp + delay_min = 21670 + delay_max = 20600000 + delay = int(exp((log(delay_min)+log(delay_max))/2)) + def wait_a_bit(): + for i in xrange(delay): pass + else: + def wait_a_bit(): + pass if longlong.is_64_bit: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') res1 = got1.getint() res2 = got2.getint() else: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') res1 = got1.getlonglong() res2 = got2.getlonglong() @@ -1758,6 +1775,12 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_convert_float_bytes(self): + t = 'int' if longlong.is_64_bit else 'float' + res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, + [boxfloat(2.5)], t).value + assert res == longlong2float.float2longlong(2.5) + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) @@ -3326,6 +3349,7 @@ lines = [line for line in mc if line.count('\t') >= 2] checkops(lines, self.bridge_loop_instructions) + def test_compile_bridge_with_target(self): # This test creates a loopy piece of code in a bridge, and builds another # unrelated loop that ends in a jump directly to this loopy bit of code. diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -449,6 +449,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) +OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) OperationBuilder.OPERATIONS = OPERATIONS @@ -502,11 +503,11 @@ else: assert 0, "unknown backend %r" % pytest.config.option.backend -# ____________________________________________________________ +# ____________________________________________________________ class RandomLoop(object): dont_generate_more = False - + def __init__(self, cpu, builder_factory, r, startvars=None): self.cpu = cpu if startvars is None: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -606,7 +606,7 @@ else: assert token struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): @@ -665,7 +665,7 @@ ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] operations.extend(ops) - + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: @@ -836,8 +836,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -847,8 +847,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -1242,6 +1242,15 @@ self.mc.MOVD_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1954,8 +1963,6 @@ mc.PUSH_r(ebx.value) elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2117,9 +2124,12 @@ # First, we need to save away the registers listed in # 'save_registers' that are not callee-save. XXX We assume that # the XMM registers won't be modified. We store them in - # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the - # single argument to closestack_addr below. - p = WORD + # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP] + # for the single argument to closestack_addr below. + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_sr(p, reg.value) @@ -2174,7 +2184,10 @@ # self._emit_call(-1, imm(self.releasegil_addr), args) # Finally, restore the registers saved above. - p = WORD + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_rs(reg.value, p) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -708,6 +708,18 @@ consider_cast_singlefloat_to_float = consider_cast_int_to_float + def consider_convert_float_bytes_to_longlong(self, op): + if longlong.is_64_bit: + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + else: + loc0 = self.xrm.loc(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,9 +601,12 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html + MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -291,6 +291,11 @@ op1 = SpaceOperation('-live-', [], None) return [op, op1] + def _noop_rewrite(self, op): + return op + + rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,6 +968,21 @@ int_return %i2 """, transform=True) + def test_convert_float_bytes_to_int(self): + from pypy.rlib.longlong2float import float2longlong + def f(x): + return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" + self.encoding_test(f, [25.0], """ + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) + def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,15 +1,16 @@ +from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr +from pypy.jit.metainterp.compile import ResumeAtPositionDescr +from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise +from pypy.rlib import longlong2float +from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rtimer import read_timestamp -from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import debug_start, debug_stop, ll_assert -from pypy.rlib.debug import make_sure_not_resized from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from pypy.jit.codewriter import heaptracker, longlong -from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise -from pypy.jit.metainterp.compile import ResumeAtPositionDescr + def arguments(*argtypes, **kwds): resulttype = kwds.pop('returns', None) @@ -20,6 +21,9 @@ return function return decorate +LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f' + + class LeaveFrame(JitException): pass @@ -663,6 +667,11 @@ a = float(a) return longlong.getfloatstorage(a) + @arguments("f", returns=LONGLONG_TYPECODE) + def bhimpl_convert_float_bytes_to_longlong(a): + a = longlong.getrealfloat(a) + return longlong2float.float2longlong(a) + # ---------- # control flow operations @@ -1309,7 +1318,7 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=(longlong.is_64_bit and "i" or "f")) + @arguments(returns=LONGLONG_TYPECODE) def bhimpl_ll_read_timestamp(): return read_timestamp() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -223,6 +223,7 @@ 'cast_float_to_singlefloat', 'cast_singlefloat_to_float', 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', + 'convert_float_bytes_to_longlong', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -419,6 +419,7 @@ 'CAST_INT_TO_FLOAT/1', # need some messy code in the backend 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', + 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3,6 +3,7 @@ import py from pypy import conftest +from pypy.jit.codewriter import longlong from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT @@ -14,6 +15,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) +from pypy.rlib.longlong2float import float2longlong from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -292,7 +294,7 @@ assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -953,7 +955,7 @@ self.meta_interp(f, [20], repeat=7) # the loop and the entry path as a single trace self.check_jitcell_token_count(1) - + # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path @@ -1470,7 +1472,7 @@ assert res == f(299) self.check_resops(guard_class=0, guard_nonnull=4, guard_nonnull_class=4, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1499,7 +1501,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1528,7 +1530,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -2636,7 +2638,7 @@ return sa assert self.meta_interp(f, [20]) == f(20) self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) - + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2677,7 +2679,7 @@ assert self.meta_interp(f, [20, 3]) == f(20, 3) self.check_jitcell_token_count(1) self.check_target_token_count(5) - + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2815,7 +2817,7 @@ for cell in get_stats().get_all_jitcell_tokens(): # Initialal trace with two labels and 5 retraces assert len(cell.target_tokens) <= 7 - + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -3793,6 +3795,16 @@ res = self.interp_operations(g, [1]) assert res == 3 + def test_float2longlong(self): + def f(n): + return float2longlong(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + # There are tests elsewhere to verify the correctness of this. + expected = float2longlong(x) + res = self.interp_operations(f, [x]) + assert longlong.getfloatstorage(res) == expected + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,7 +1,6 @@ from pypy.conftest import gettestobjspace import os import py -from pypy.rlib.rarithmetic import is_valid_int class AppTestSSL: @@ -31,7 +30,6 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert is_valid_int(_ssl.OPENSSL_VERSION_NUMBER) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) @@ -92,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -181,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -37,26 +37,44 @@ 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', + 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'generic': 'interp_boxes.W_GenericBox', 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', + 'bool8': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'byte': 'interp_boxes.W_Int8Box', 'uint8': 'interp_boxes.W_UInt8Box', + 'ubyte': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'short': 'interp_boxes.W_Int16Box', 'uint16': 'interp_boxes.W_UInt16Box', + 'ushort': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'intc': 'interp_boxes.W_Int32Box', 'uint32': 'interp_boxes.W_UInt32Box', + 'uintc': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', 'uint64': 'interp_boxes.W_UInt64Box', + 'longlong': 'interp_boxes.W_LongLongBox', + 'ulonglong': 'interp_boxes.W_ULongLongBox', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', 'float_': 'interp_boxes.W_Float64Box', 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', + 'intp': 'types.IntP.BoxType', + 'uintp': 'types.UIntP.BoxType', + 'flexible': 'interp_boxes.W_FlexibleBox', + 'character': 'interp_boxes.W_CharacterBox', + 'str_': 'interp_boxes.W_StringBox', + 'unicode_': 'interp_boxes.W_UnicodeBox', + 'void': 'interp_boxes.W_VoidBox', } # ufuncs diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -33,7 +33,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat"] + "unegative", "flat", "tostring"] TWO_ARG_FUNCTIONS = ["dot", 'take'] class FakeSpace(object): @@ -51,6 +51,8 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" + w_unicode = "unicode" def __init__(self): """NOT_RPYTHON""" @@ -91,8 +93,12 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, long): + return LongObject(obj) elif isinstance(obj, W_Root): return obj + elif isinstance(obj, str): + return StringObject(obj) raise NotImplementedError def newlist(self, items): @@ -120,6 +126,11 @@ return int(w_obj.floatval) raise NotImplementedError + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj.v + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj @@ -151,7 +162,13 @@ return instantiate(klass) def newtuple(self, list_w): - raise ValueError + return ListObject(list_w) + + def newdict(self): + return {} + + def setitem(self, dict, item, value): + dict[item] = value def len_w(self, w_obj): if isinstance(w_obj, ListObject): @@ -178,6 +195,11 @@ def __init__(self, intval): self.intval = intval +class LongObject(W_Root): + tp = FakeSpace.w_long + def __init__(self, intval): + self.intval = intval + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): @@ -190,6 +212,11 @@ self.stop = stop self.step = step +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, v): + self.v = v + class InterpreterState(object): def __init__(self, code): self.code = code @@ -407,6 +434,9 @@ w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) + elif self.name == "tostring": + arr.descr_tostring(interp.space) + w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,24 +1,25 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.stringtype import str_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () def new_dtype_getter(name): - def get_dtype(space): + def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return getattr(get_dtype_cache(space), "w_%sdtype" % name) def new(space, w_subtype, w_value): - dtype = get_dtype(space) + dtype = _get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -37,6 +38,9 @@ w_subtype.getname(space, '?') ) + def get_dtype(self, space): + return self._get_dtype(space) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -44,12 +48,12 @@ return space.format(self.item(space), w_spec) def descr_int(self, space): - box = self.convert_to(W_LongBox.get_dtype(space)) + box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box.get_dtype(space)) + box = self.convert_to(W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -130,7 +134,7 @@ class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("bool") + descr__new__, _get_dtype = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): _attrs_ = () @@ -146,34 +150,40 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int8") + descr__new__, _get_dtype = new_dtype_getter("int8") class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint8") + descr__new__, _get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int16") + descr__new__, _get_dtype = new_dtype_getter("int16") class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint16") + descr__new__, _get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int32") + descr__new__, _get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint32") + descr__new__, _get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("long") + descr__new__, _get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("ulong") + descr__new__, _get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int64") + descr__new__, _get_dtype = new_dtype_getter("int64") + +class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('longlong') class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint64") + descr__new__, _get_dtype = new_dtype_getter("uint64") + +class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): _attrs_ = () @@ -182,16 +192,71 @@ _attrs_ = () class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float32") + descr__new__, _get_dtype = new_dtype_getter("float32") class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float64") + descr__new__, _get_dtype = new_dtype_getter("float64") +class W_FlexibleBox(W_GenericBox): + def __init__(self, arr, ofs, dtype): + self.arr = arr # we have to keep array alive + self.ofs = ofs + self.dtype = dtype + + def get_dtype(self, space): + return self.arr.dtype + @unwrap_spec(self=W_GenericBox) def descr_index(space, self): return space.index(self.item(space)) +class W_VoidBox(W_FlexibleBox): + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) + + @unwrap_spec(item=str) + def descr_setitem(self, space, item, w_value): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + dtype.itemtype.store(self.arr, 1, self.ofs, ofs, + dtype.coerce(space, w_value)) + +class W_CharacterBox(W_FlexibleBox): + pass + +class W_StringBox(W_CharacterBox): + def descr__new__string_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_string_dtype + + arg = space.str_w(space.str(w_arg)) + arr = W_NDimArray([1], new_string_dtype(space, len(arg))) + for i in range(len(arg)): + arr.storage[i] = arg[i] + return W_StringBox(arr, 0, arr.dtype) + + +class W_UnicodeBox(W_CharacterBox): + def descr__new__unicode_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + + arg = space.unicode_w(unicode_from_object(space, w_arg)) + arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -348,3 +413,28 @@ __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) + +W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, + __module__ = "numpypy", + __getitem__ = interp2app(W_VoidBox.descr_getitem), + __setitem__ = interp2app(W_VoidBox.descr_setitem), +) + +W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, + __module__ = "numpypy", +) + +W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), +) + +W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), +) + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,26 +1,29 @@ + +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.module.micronumpy import types, interp_boxes from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" - - -VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) +VOIDLTR = 'V' +STRINGLTR = 'S' +UNICODELTR = 'U' class W_Dtype(Wrappable): _immutable_fields_ = ["itemtype", "num", "kind"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + alternate_constructors=[], aliases=[], + fields=None, fieldnames=None, native=True): self.itemtype = itemtype self.num = num self.kind = kind @@ -29,53 +32,28 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases - - def malloc(self, length): - # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - ) + self.fields = fields + self.fieldnames = fieldnames + self.native = native @specialize.argtype(1) def box(self, value): return self.itemtype.box(value) def coerce(self, space, w_item): - return self.itemtype.coerce(space, w_item) + return self.itemtype.coerce(space, self, w_item) - def getitem(self, storage, i): - return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + def getitem(self, arr, i): + return self.itemtype.read(arr, 1, i, 0) - def getitem_bool(self, storage, i): - isize = self.itemtype.get_element_size() - return self.itemtype.read_bool(storage, isize, i, 0) + def getitem_bool(self, arr, i): + return self.itemtype.read_bool(arr, 1, i, 0) - def setitem(self, storage, i, box): - self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + def setitem(self, arr, i, box): + self.itemtype.store(arr, 1, i, 0, box) def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) - - def descr__new__(space, w_subtype, w_dtype): - cache = get_dtype_cache(space) - - if space.is_w(w_dtype, space.w_None): - return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): - name = space.str_w(w_dtype) - for dtype in cache.builtin_dtypes: - if dtype.name == name or dtype.char == name or name in dtype.aliases: - return dtype - else: - for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: - return dtype - if w_dtype is dtype.w_box_type: - return dtype - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) def descr_str(self, space): return space.wrap(self.name) @@ -86,6 +64,14 @@ def descr_get_itemsize(self, space): return space.wrap(self.itemtype.get_element_size()) + def descr_get_byteorder(self, space): + if self.native: + return space.wrap('=') + return space.wrap(nonnative_byteorder_prefix) + + def descr_get_alignment(self, space): + return space.wrap(self.itemtype.alignment) + def descr_get_shape(self, space): return space.newtuple([]) @@ -99,31 +85,193 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_get_fields(self, space): + if self.fields is None: + return space.w_None + w_d = space.newdict() + for name, (offset, subdtype) in self.fields.iteritems(): + space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, + space.wrap(offset)])) + return w_d + + def descr_get_names(self, space): + if self.fieldnames is None: + return space.w_None + return space.newtuple([space.wrap(name) for name in self.fieldnames]) + + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + if self.fields is None: + raise OperationError(space.w_KeyError, space.wrap("There are no keys in dtypes %s" % self.name)) + try: + return self.fields[item][1] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap("Field named %s not found" % item)) + def is_int_type(self): return (self.kind == SIGNEDLTR or self.kind == UNSIGNEDLTR or self.kind == BOOLLTR) + def is_signed(self): + return self.kind == SIGNEDLTR + def is_bool_type(self): return self.kind == BOOLLTR + def is_record_type(self): + return self.fields is not None + + def __repr__(self): + if self.fields is not None: + return '' % self.fields + return '' % self.itemtype + + def get_size(self): + return self.itemtype.get_element_size() + +def dtype_from_list(space, w_lst): + lst_w = space.listview(w_lst) + fields = {} + offset = 0 + ofs_and_items = [] + fieldnames = [] + for w_elem in lst_w: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc) + fldname = space.str_w(w_fldname) + if fldname in fields: + raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + assert isinstance(subdtype, W_Dtype) + fields[fldname] = (offset, subdtype) + ofs_and_items.append((offset, subdtype.itemtype)) + offset += subdtype.itemtype.get_element_size() + fieldnames.append(fldname) + itemtype = types.RecordType(ofs_and_items, offset) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()), + "V", space.gettypefor(interp_boxes.W_VoidBox), fields=fields, + fieldnames=fieldnames) + +def dtype_from_dict(space, w_dict): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from dict")) + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + w_box_type = space.gettypefor(interp_boxes.W_VoidBox) + raise OperationError(space.w_NotImplementedError, space.wrap( + "pure void dtype")) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) + +def dtype_from_spec(space, name): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from spec")) + +def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + + if space.is_w(w_dtype, space.w_None): + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_str): + name = space.str_w(w_dtype) + if ',' in name: + return dtype_from_spec(space, name) + try: + return cache.dtypes_by_name[name] + except KeyError: + pass + if name[0] in 'VSU' or name[0] in '<>=' and name[1] in 'VSU': + return variable_dtype(space, name) + elif space.isinstance_w(w_dtype, space.w_list): + return dtype_from_list(space, w_dtype) + elif space.isinstance_w(w_dtype, space.w_dict): + return dtype_from_dict(space, w_dtype) + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpypy", - __new__ = interp2app(W_Dtype.descr__new__.im_func), + __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __getitem__ = interp2app(W_Dtype.descr_getitem), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), + char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), + fields = GetSetProperty(W_Dtype.descr_get_fields), + names = GetSetProperty(W_Dtype.descr_get_names), ) W_Dtype.typedef.acceptable_as_base_class = False +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' + +def new_string_dtype(space, size): + return W_Dtype( + types.StringType(size), + num=18, + kind=STRINGLTR, + name='string', + char='S' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + ) + +def new_unicode_dtype(space, size): + return W_Dtype( + types.UnicodeType(size), + num=19, + kind=UNICODELTR, + name='unicode', + char='U' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( @@ -239,18 +387,134 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - + self.w_stringdtype = W_Dtype( + types.StringType(1), + num=18, + kind=STRINGLTR, + name='string', + char='S', + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + alternate_constructors=[space.w_str], + ) + self.w_unicodedtype = W_Dtype( + types.UnicodeType(1), + num=19, + kind=UNICODELTR, + name='unicode', + char='U', + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + alternate_constructors=[space.w_unicode], + ) + self.w_voiddtype = W_Dtype( + types.VoidType(0), + num=20, + kind=VOIDLTR, + name='void', + char='V', + w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + #alternate_constructors=[space.w_buffer], + # XXX no buffer in space + ) self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, - self.w_float64dtype + self.w_int64dtype, self.w_uint64dtype, + self.w_float32dtype, + self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, + self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) + self.dtypes_by_name = {} + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): + self.dtypes_by_name[dtype.name] = dtype + can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + self.dtypes_by_name[can_name] = dtype + self.dtypes_by_name[byteorder_prefix + can_name] = dtype + self.dtypes_by_name['=' + can_name] = dtype + new_name = nonnative_byteorder_prefix + can_name + itemtypename = dtype.itemtype.__class__.__name__ + itemtype = getattr(types, 'NonNative' + itemtypename)() + self.dtypes_by_name[new_name] = W_Dtype( + itemtype, + dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, + native=False) + for alias in dtype.aliases: + self.dtypes_by_name[alias] = dtype + self.dtypes_by_name[dtype.char] = dtype + + typeinfo_full = { + 'LONGLONG': self.w_int64dtype, + 'SHORT': self.w_int16dtype, + 'VOID': self.w_voiddtype, + #'LONGDOUBLE':, + 'UBYTE': self.w_uint8dtype, + 'UINTP': self.w_ulongdtype, + 'ULONG': self.w_ulongdtype, + 'LONG': self.w_longdtype, + 'UNICODE': self.w_unicodedtype, + #'OBJECT', + 'ULONGLONG': self.w_uint64dtype, + 'STRING': self.w_stringdtype, + #'CDOUBLE', + #'DATETIME', + 'UINT': self.w_uint32dtype, + 'INTP': self.w_longdtype, + #'HALF', + 'BYTE': self.w_int8dtype, + #'CFLOAT': , + #'TIMEDELTA', + 'INT': self.w_int32dtype, + 'DOUBLE': self.w_float64dtype, + 'USHORT': self.w_uint16dtype, + 'FLOAT': self.w_float32dtype, + 'BOOL': self.w_booldtype, + #, 'CLONGDOUBLE'] + } + typeinfo_partial = { + 'Generic': interp_boxes.W_GenericBox, + 'Character': interp_boxes.W_CharacterBox, + 'Flexible': interp_boxes.W_FlexibleBox, + 'Inexact': interp_boxes.W_InexactBox, + 'Integer': interp_boxes.W_IntegerBox, + 'SignedInteger': interp_boxes.W_SignedIntegerBox, + 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, + #'ComplexFloating', + 'Number': interp_boxes.W_NumberBox, + 'Floating': interp_boxes.W_FloatingBox + } + w_typeinfo = space.newdict() + for k, v in typeinfo_partial.iteritems(): + space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) + for k, dtype in typeinfo_full.iteritems(): + itemsize = dtype.itemtype.get_element_size() + items_w = [space.wrap(dtype.char), + space.wrap(dtype.num), + space.wrap(itemsize * 8), # in case of changing + # number of bits per byte in the future + space.wrap(itemsize or 1)] + if dtype.is_int_type(): + if dtype.kind == BOOLLTR: + w_maxobj = space.wrap(1) + w_minobj = space.wrap(0) + elif dtype.is_signed(): + w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + else: + w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_minobj = space.wrap(0) + items_w = items_w + [w_maxobj, w_minobj] + items_w = items_w + [dtype.w_box_type] + + w_tuple = space.newtuple(items_w) + space.setitem(w_typeinfo, space.wrap(k), w_tuple) + self.w_typeinfo = w_typeinfo def get_dtype_cache(space): return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,7 +2,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate from pypy.module.micronumpy.strides import calculate_broadcast_strides,\ - calculate_slice_strides, calculate_dot_strides + calculate_slice_strides, calculate_dot_strides, enumerate_chunks """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see @@ -42,25 +42,67 @@ we can go faster. All the calculations happen in next() -next_step_x() tries to do the iteration for a number of steps at once, +next_skip_x() tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ # structures to describe slicing -class Chunk(object): +class BaseChunk(object): + pass + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + + arr = arr.get_concrete() + ofs, subdtype = arr.dtype.fields[self.name] + # strides backstrides are identical, ofs only changes start + return W_NDimSlice(arr.start + ofs, arr.strides[:], arr.backstrides[:], + arr.shape[:], arr, subdtype) + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice,\ + VirtualSlice, ConcreteArray + + shape = self.extend_shape(arr.shape) + if not isinstance(arr, ConcreteArray): + return VirtualSlice(arr, self, shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.strides, + arr.backstrides, self.l) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], arr) + + +class Chunk(BaseChunk): axis_step = 1 + def __init__(self, start, stop, step, lgt): self.start = start self.stop = stop self.step = step self.lgt = lgt - def extend_shape(self, shape): - if self.step != 0: - shape.append(self.lgt) - def __repr__(self): return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, self.lgt) @@ -106,17 +148,19 @@ raise NotImplementedError class ArrayIterator(BaseIterator): - def __init__(self, size): + def __init__(self, size, element_size): self.offset = 0 self.size = size + self.element_size = element_size def next(self, shapelen): return self.next_skip_x(1) - def next_skip_x(self, ofs): + def next_skip_x(self, x): arr = instantiate(ArrayIterator) arr.size = self.size - arr.offset = self.offset + ofs + arr.offset = self.offset + x * self.element_size + arr.element_size = self.element_size return arr def next_no_increase(self, shapelen): @@ -163,7 +207,7 @@ elif isinstance(t, ViewTransform): r = calculate_slice_strides(self.res_shape, self.offset, self.strides, - self.backstrides, t.chunks) + self.backstrides, t.chunks.l) return ViewIterator(r[1], r[2], r[3], r[0]) @jit.unroll_safe diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,10 +7,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, NewAxisChunk, ViewIterator) -from pypy.module.micronumpy.strides import (calculate_slice_strides, - shape_agreement, find_shape_and_elems, get_shape_from_iterable, - calc_new_strides, to_coords, enumerate_chunks) + SkipLastAxisIterator, Chunk, ViewIterator, Chunks, RecordChunk, + NewAxisChunk) +from pypy.module.micronumpy.strides import (shape_agreement, + find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi @@ -47,7 +47,7 @@ ) flat_set_driver = jit.JitDriver( greens=['shapelen', 'base'], - reds=['step', 'ai', 'lngth', 'arr', 'basei'], + reds=['step', 'lngth', 'ri', 'arr', 'basei'], name='numpy_flatset', ) @@ -79,8 +79,8 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + shape = _find_shape(space, w_size) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): def impl(self, space): @@ -225,8 +225,7 @@ return scalar_w(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - out_size = support.product(out_shape) - result = W_NDimArray(out_size, out_shape, dtype) + result = W_NDimArray(out_shape, dtype) # This is the place to add fpypy and blas return multidim_dot(space, self.get_concrete(), other.get_concrete(), result, dtype, @@ -245,7 +244,7 @@ return space.wrap(self.find_dtype().itemtype.get_element_size()) def descr_get_nbytes(self, space): - return space.wrap(self.size * self.find_dtype().itemtype.get_element_size()) + return space.wrap(self.size) @jit.unroll_safe def descr_get_shape(self, space): @@ -253,13 +252,16 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.size, w_iterable) + support.product(self.shape), w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) + + def get_size(self): + return self.size // self.find_dtype().get_size() def descr_copy(self, space): return self.copy(space) @@ -279,7 +281,7 @@ def empty_copy(self, space, dtype): shape = self.shape - return W_NDimArray(support.product(shape), shape[:], dtype, 'C') + return W_NDimArray(shape[:], dtype, 'C') def descr_len(self, space): if len(self.shape): @@ -320,13 +322,15 @@ """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape """ + if space.isinstance_w(w_idx, space.w_str): + return False shape_len = len(self.shape) if space.isinstance_w(w_idx, space.w_tuple): for w_item in space.fixedview(w_idx): if (space.isinstance_w(w_item, space.w_slice) or - space.isinstance_w(w_item, space.w_NoneType)): + space.is_w(w_item, space.w_None)): return False - elif space.isinstance_w(w_idx, space.w_NoneType): + elif space.is_w(w_idx, space.w_None): return False if shape_len == 0: raise OperationError(space.w_IndexError, space.wrap( @@ -347,44 +351,51 @@ @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_str): + idx = space.str_w(w_idx) + dtype = self.find_dtype() + if not dtype.is_record_type() or idx not in dtype.fields: + raise OperationError(space.w_ValueError, space.wrap( + "field named %s not defined" % idx)) + return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): - return [Chunk(*space.decode_index4(w_idx, self.shape[0]))] - elif space.isinstance_w(w_idx, space.w_NoneType): - return [NewAxisChunk()] + return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))]) + elif space.is_w(w_idx, space.w_None): + return Chunks([NewAxisChunk()]) result = [] i = 0 for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_NoneType): + if space.is_w(w_item, space.w_None): result.append(NewAxisChunk()) else: result.append(Chunk(*space.decode_index4(w_item, self.shape[i]))) i += 1 - return result + return Chunks(result) - def count_all_true(self, arr): - sig = arr.find_sig() - frame = sig.create_frame(arr) - shapelen = len(arr.shape) + def count_all_true(self): + sig = self.find_sig() + frame = sig.create_frame(self) + shapelen = len(self.shape) s = 0 iter = None while not frame.done(): - count_driver.jit_merge_point(arr=arr, frame=frame, iter=iter, s=s, + count_driver.jit_merge_point(arr=self, frame=frame, iter=iter, s=s, shapelen=shapelen) iter = frame.get_final_iter() - s += arr.dtype.getitem_bool(arr.storage, iter.offset) + s += self.dtype.getitem_bool(self, iter.offset) frame.next(shapelen) return s def getitem_filter(self, space, arr): concr = arr.get_concrete() - if concr.size > self.size: + if concr.get_size() > self.get_size(): raise OperationError(space.w_IndexError, space.wrap("index out of range for array")) - size = self.count_all_true(concr) - res = W_NDimArray(size, [size], self.find_dtype()) - ri = ArrayIterator(size) + size = concr.count_all_true() + res = W_NDimArray([size], self.find_dtype()) + ri = res.create_iter() shapelen = len(self.shape) argi = concr.create_iter() sig = self.find_sig() @@ -394,7 +405,7 @@ filter_driver.jit_merge_point(concr=concr, argi=argi, ri=ri, frame=frame, v=v, res=res, sig=sig, shapelen=shapelen, self=self) - if concr.dtype.getitem_bool(concr.storage, argi.offset): + if concr.dtype.getitem_bool(concr, argi.offset): v = sig.eval(frame, self) res.setitem(ri.offset, v) ri = ri.next(1) @@ -404,23 +415,6 @@ frame.next(shapelen) return res - def setitem_filter(self, space, idx, val): - size = self.count_all_true(idx) - arr = SliceArray([size], self.dtype, self, val) - sig = arr.find_sig() - shapelen = len(self.shape) - frame = sig.create_frame(arr) - idxi = idx.create_iter() - while not frame.done(): - filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, - frame=frame, arr=arr, - shapelen=shapelen) - if idx.dtype.getitem_bool(idx.storage, idxi.offset): - sig.eval(frame, arr) - frame.next_from_second(1) - frame.next_first(shapelen) - idxi = idxi.next(shapelen) - def descr_getitem(self, space, w_idx): if (isinstance(w_idx, BaseArray) and w_idx.shape == self.shape and w_idx.find_dtype().is_bool_type()): @@ -430,7 +424,24 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return self.create_slice(chunks) + return chunks.apply(self) + + def setitem_filter(self, space, idx, val): + size = idx.count_all_true() + arr = SliceArray([size], self.dtype, self, val) + sig = arr.find_sig() + shapelen = len(self.shape) + frame = sig.create_frame(arr) + idxi = idx.create_iter() + while not frame.done(): + filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, + frame=frame, arr=arr, + shapelen=shapelen) + if idx.dtype.getitem_bool(idx, idxi.offset): + sig.eval(frame, arr) + frame.next_from_second(1) + frame.next_first(shapelen) + idxi = idxi.next(shapelen) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -448,26 +459,9 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(chunks).get_concrete() + view = chunks.apply(self).get_concrete() view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, chunks): - shape = [] - i = -1 - for i, chunk in enumerate_chunks(chunks): - chunk.extend_shape(shape) - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - if not isinstance(self, ConcreteArray): - return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.shape, self.start, self.strides, - self.backstrides, chunks) - _, start, strides, backstrides = r - return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], self) - def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -484,7 +478,8 @@ w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - new_shape = get_shape_from_iterable(space, self.size, w_shape) + new_shape = get_shape_from_iterable(space, support.product(self.shape), + w_shape) return self.reshape(space, new_shape) def reshape(self, space, new_shape): @@ -522,7 +517,7 @@ def descr_mean(self, space, w_axis=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) - w_denom = space.wrap(self.size) + w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) @@ -541,7 +536,7 @@ concr.fill(space, w_value) def descr_nonzero(self, space): - if self.size > 1: + if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -620,8 +615,7 @@ space.wrap("axis unsupported for take")) index_i = index.create_iter() res_shape = index.shape - size = support.product(res_shape) - res = W_NDimArray(size, res_shape[:], concr.dtype, concr.order) + res = W_NDimArray(res_shape[:], concr.dtype, concr.order) res_i = res.create_iter() shapelen = len(index.shape) sig = concr.find_sig() @@ -660,6 +654,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_tostring(self, space): + ra = ToStringArray(self) + loop.compute(ra) + return space.wrap(ra.s.build()) + def compute_first_step(self, sig, frame): pass @@ -681,8 +680,7 @@ """ Intermediate class representing a literal. """ - size = 1 - _attrs_ = ["dtype", "value", "shape"] + _attrs_ = ["dtype", "value", "shape", "size"] def __init__(self, dtype, value): self.shape = [] @@ -690,6 +688,7 @@ self.dtype = dtype assert isinstance(value, interp_boxes.W_GenericBox) self.value = value + self.size = dtype.get_size() def find_dtype(self): return self.dtype @@ -707,8 +706,7 @@ return self def reshape(self, space, new_shape): - size = support.product(new_shape) - res = W_NDimArray(size, new_shape, self.dtype, 'C') + res = W_NDimArray(new_shape, self.dtype, 'C') res.setitem(0, self.value) return res @@ -721,6 +719,7 @@ self.forced_result = None self.res_dtype = res_dtype self.name = name + self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): # Function for deleting references to source arrays, @@ -728,7 +727,7 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.size, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype) loop.compute(ra) return ra.left @@ -756,7 +755,6 @@ def __init__(self, child, chunks, shape): self.child = child self.chunks = chunks - self.size = support.product(shape) VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) def create_sig(self): @@ -768,7 +766,7 @@ def force_if_needed(self): if self.forced_result is None: concr = self.child.get_concrete() - self.forced_result = concr.create_slice(self.chunks) + self.forced_result = self.chunks.apply(concr) def _del_sources(self): self.child = None @@ -801,7 +799,6 @@ self.left = left self.right = right self.calc_dtype = calc_dtype - self.size = support.product(self.shape) def _del_sources(self): self.left = None @@ -829,15 +826,30 @@ self.left.create_sig(), self.right.create_sig()) class ResultArray(Call2): - def __init__(self, child, size, shape, dtype, res=None, order='C'): + def __init__(self, child, shape, dtype, res=None, order='C'): if res is None: - res = W_NDimArray(size, shape, dtype, order) + res = W_NDimArray(shape, dtype, order) Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): return signature.ResultSignature(self.res_dtype, self.left.create_sig(), self.right.create_sig()) +class ToStringArray(Call1): + def __init__(self, child): + dtype = child.find_dtype() + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) + Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, + child) + self.res = W_NDimArray([1], dtype, 'C') + self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res.storage) + + def create_sig(self): + return signature.ToStringSignature(self.calc_dtype, + self.values.create_sig()) + def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -909,13 +921,13 @@ """ _immutable_fields_ = ['storage'] - def __init__(self, size, shape, dtype, order='C', parent=None): - self.size = size + def __init__(self, shape, dtype, order='C', parent=None): self.parent = parent + self.size = support.product(shape) * dtype.get_size() if parent is not None: self.storage = parent.storage else: - self.storage = dtype.malloc(size) + self.storage = dtype.itemtype.malloc(self.size) self.order = order self.dtype = dtype if self.strides is None: @@ -934,13 +946,14 @@ return self.dtype def getitem(self, item): - return self.dtype.getitem(self.storage, item) + return self.dtype.getitem(self, item) def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value) def calc_strides(self, shape): + dtype = self.find_dtype() strides = [] backstrides = [] s = 1 @@ -948,8 +961,8 @@ if self.order == 'C': shape_rev.reverse() for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) s *= sh if self.order == 'C': strides.reverse() @@ -997,9 +1010,9 @@ shapelen = len(self.shape) if shapelen == 1: rffi.c_memcpy( - rffi.ptradd(self.storage, self.start * itemsize), - rffi.ptradd(w_value.storage, w_value.start * itemsize), - self.size * itemsize + rffi.ptradd(self.storage, self.start), + rffi.ptradd(w_value.storage, w_value.start), + self.size ) else: dest = SkipLastAxisIterator(self) @@ -1014,7 +1027,7 @@ dest.next() def copy(self, space): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.shape[:], self.dtype, self.order) array.setslice(space, self) return array @@ -1028,14 +1041,15 @@ class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): + def __init__(self, start, strides, backstrides, shape, parent, dtype=None): assert isinstance(parent, ConcreteArray) if isinstance(parent, W_NDimSlice): parent = parent.parent self.strides = strides self.backstrides = backstrides - ViewArray.__init__(self, support.product(shape), shape, parent.dtype, - parent.order, parent) + if dtype is None: + dtype = parent.dtype + ViewArray.__init__(self, shape, dtype, parent.order, parent) self.start = start def create_iter(self, transforms=None): @@ -1050,12 +1064,13 @@ # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] - s = self.strides[0] + dtype = self.find_dtype() + s = self.strides[0] // dtype.get_size() if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s) - backstrides.append(s * (sh - 1)) + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) s *= max(1, sh) if self.order == 'C': strides.reverse() @@ -1083,14 +1098,16 @@ """ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value) def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) def create_iter(self, transforms=None): - return ArrayIterator(self.size).apply_transformations(self, transforms) + esize = self.find_dtype().get_size() + return ArrayIterator(self.size, esize).apply_transformations(self, + transforms) def create_sig(self): return signature.ArraySignature(self.dtype) @@ -1098,22 +1115,18 @@ def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) -def _find_size_and_shape(space, w_size): +def _find_shape(space, w_size): if space.isinstance_w(w_size, space.w_int): - size = space.int_w(w_size) - shape = [size] - else: - size = 1 - shape = [] - for w_item in space.fixedview(w_size): - item = space.int_w(w_item) - size *= item - shape.append(item) - return size, shape + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + return shape @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1143,28 +1156,33 @@ if copy: return w_item_or_iterable.copy(space) return w_item_or_iterable - shape, elems_w = find_shape_and_elems(space, w_item_or_iterable) + if w_dtype is None or space.is_w(w_dtype, space.w_None): + dtype = None + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + shape, elems_w = find_shape_and_elems(space, w_item_or_iterable, dtype) # they come back in C order - size = len(elems_w) - if w_dtype is None or space.is_w(w_dtype, space.w_None): - w_dtype = None + if dtype is None: for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + dtype) + if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + if dtype is None: + dtype = interp_dtype.get_dtype_cache(space).w_float64dtype shapelen = len(shape) - arr_iter = ArrayIterator(arr.size) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin + arr = W_NDimArray(shape[:], dtype=dtype, order=order) + arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, + dtype.setitem(arr, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1173,22 +1191,22 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(1)) - arr = W_NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(shape[:], dtype=dtype) one = dtype.box(1) - arr.dtype.fill(arr.storage, one, 0, size) + arr.dtype.fill(arr.storage, one, 0, arr.size) return space.wrap(arr) @unwrap_spec(arr=BaseArray, skipna=bool, keepdims=bool) @@ -1236,13 +1254,13 @@ "array dimensions must agree except for axis being concatenated")) elif i == axis: shape[i] += axis_size - res = W_NDimArray(support.product(shape), shape, dtype, 'C') + res = W_NDimArray(shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: chunks[axis] = Chunk(axis_start, axis_start + arr.shape[axis], 1, arr.shape[axis]) - res.create_slice(chunks).setslice(space, arr) + Chunks(chunks).apply(res).setslice(space, arr) axis_start += arr.shape[axis] return res @@ -1330,6 +1348,7 @@ std = interp2app(BaseArray.descr_std), fill = interp2app(BaseArray.descr_fill), + tostring = interp2app(BaseArray.descr_tostring), copy = interp2app(BaseArray.descr_copy), flatten = interp2app(BaseArray.descr_flatten), @@ -1352,7 +1371,7 @@ self.iter = sig.create_frame(arr).get_final_iter() self.base = arr self.index = 0 - ViewArray.__init__(self, arr.size, [arr.size], arr.dtype, arr.order, + ViewArray.__init__(self, [arr.get_size()], arr.dtype, arr.order, arr) def descr_next(self, space): @@ -1367,7 +1386,7 @@ return self def descr_len(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) def descr_index(self, space): return space.wrap(self.index) @@ -1385,28 +1404,26 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) # setslice would have been better, but flat[u:v] for arbitrary # shapes of array a cannot be represented as a[x1:x2, y1:y2] basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) if lngth <2: return base.getitem(basei.offset) - ri = ArrayIterator(lngth) - res = W_NDimArray(lngth, [lngth], base.dtype, - base.order) + res = W_NDimArray([lngth], base.dtype, base.order) + ri = res.create_iter() while not ri.done(): flat_get_driver.jit_merge_point(shapelen=shapelen, base=base, basei=basei, step=step, res=res, - ri=ri, - ) + ri=ri) w_val = base.getitem(basei.offset) - res.setitem(ri.offset,w_val) + res.setitem(ri.offset, w_val) basei = basei.next_skip_x(shapelen, step) ri = ri.next(shapelen) return res @@ -1417,27 +1434,28 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) - ai = 0 + ri = arr.create_iter() basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) while lngth > 0: flat_set_driver.jit_merge_point(shapelen=shapelen, - basei=basei, - base=base, - step=step, - arr=arr, - ai=ai, - lngth=lngth, - ) - v = arr.getitem(ai).convert_to(base.dtype) + basei=basei, + base=base, + step=step, + arr=arr, + lngth=lngth, + ri=ri) + v = arr.getitem(ri.offset).convert_to(base.dtype) base.setitem(basei.offset, v) # need to repeat input values until all assignments are done - ai = (ai + 1) % arr.size basei = basei.next_skip_x(shapelen, step) + ri = ri.next(shapelen) + # WTF is numpy thinking? + ri.offset %= arr.size lngth -= 1 def create_sig(self): @@ -1445,9 +1463,9 @@ def create_iter(self, transforms=None): return ViewIterator(self.base.start, self.base.strides, - self.base.backstrides, - self.base.shape).apply_transformations(self.base, - transforms) + self.base.backstrides, + self.base.shape).apply_transformations(self.base, + transforms) def descr_base(self, space): return space.wrap(self.base) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -51,9 +51,11 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(num_items, [num_items], dtype=dtype) - for i, val in enumerate(items): - a.dtype.setitem(a.storage, i, val) + a = W_NDimArray([num_items], dtype=dtype) + ai = a.create_iter() + for val in items: + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) return space.wrap(a) @@ -61,6 +63,7 @@ from pypy.module.micronumpy.interp_numarray import W_NDimArray itemsize = dtype.itemtype.get_element_size() + assert itemsize >= 0 if count == -1: count = length / itemsize if length % itemsize != 0: @@ -71,20 +74,23 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(count, [count], dtype=dtype) - fromstring_loop(a, count, dtype, itemsize, s) + a = W_NDimArray([count], dtype=dtype) + fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) -fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', - 'dtype', 's', 'a']) +fromstring_driver = jit.JitDriver(greens=[], reds=['i', 'itemsize', + 'dtype', 'ai', 's', 'a']) -def fromstring_loop(a, count, dtype, itemsize, s): +def fromstring_loop(a, dtype, itemsize, s): i = 0 - while i < count: - fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, - itemsize=itemsize, s=s, i=i) + ai = a.create_iter() + while not ai.done(): + fromstring_driver.jit_merge_point(a=a, dtype=dtype, + itemsize=itemsize, s=s, i=i, + ai=ai) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) - a.dtype.setitem(a.storage, i, val) + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) i += 1 @unwrap_spec(s=str, count=int, sep=str) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -156,7 +156,7 @@ shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] else: shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(support.product(shape), shape, dtype) + result = W_NDimArray(shape, dtype) arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, result, obj, dim) loop.compute(arr) @@ -314,7 +314,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -4,6 +4,7 @@ ViewTransform, BroadcastTransform from pypy.tool.pairtype import extendabletype from pypy.module.micronumpy.loop import ComputationDone +from pypy.rlib import jit """ Signature specifies both the numpy expression that has been constructed and the assembler to be compiled. This is a very important observation - @@ -142,11 +143,10 @@ from pypy.module.micronumpy.interp_numarray import ConcreteArray concr = arr.get_concrete() assert isinstance(concr, ConcreteArray) - storage = concr.storage if self.iter_no >= len(iterlist): iterlist.append(concr.create_iter(transforms)) if self.array_no >= len(arraylist): - arraylist.append(storage) + arraylist.append(concr) def eval(self, frame, arr): iter = frame.iterators[self.iter_no] @@ -318,6 +318,20 @@ offset = frame.get_final_iter().offset arr.left.setitem(offset, self.right.eval(frame, arr.right)) +class ToStringSignature(Call1): + def __init__(self, dtype, child): + Call1.__init__(self, None, 'tostring', dtype, child) + + @jit.unroll_safe + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import ToStringArray + + assert isinstance(arr, ToStringArray) + arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + self.dtype)) + for i in range(arr.item_size): + arr.s.append(arr.res_casted[i]) + class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): self.left._invent_numbering(new_cache(), allnumbers) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -46,22 +46,31 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides -def find_shape_and_elems(space, w_iterable): +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if space.issequence_w(w_elem): + return False + return True + +def find_shape_and_elems(space, w_iterable, dtype): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) + is_rec_type = dtype is not None and dtype.is_record_type() while True: new_batch = [] if not batch: return shape, [] - if not space.issequence_w(batch[0]): - for elem in batch: - if space.issequence_w(elem): + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape, batch size = space.len_w(batch[0]) for w_elem in batch: - if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) new_batch += space.listview(w_elem) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,6 +4,8 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) from pypy.module.micronumpy.interp_boxes import W_Float64Box +from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix,\ + byteorder_prefix from pypy.conftest import option import sys @@ -15,14 +17,16 @@ sys.modules['numpypy'] = numpy sys.modules['_numpypy'] = numpy cls.space = gettestobjspace(usemodules=['micronumpy']) + cls.w_non_native_prefix = cls.space.wrap(nonnative_byteorder_prefix) + cls.w_native_prefix = cls.space.wrap(byteorder_prefix) class TestSignature(object): def test_binop_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) - ar2 = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) + ar2 = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(2.0))) sig1 = v1.find_sig() @@ -40,7 +44,7 @@ v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() - bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray([10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.find_sig() is not v1.find_sig() assert v5.find_sig() is not v2.find_sig() @@ -57,7 +61,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.find_sig() is v2.find_sig() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,5 +1,7 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest - +from pypy.interpreter.gateway import interp2app class AppTestDtypes(BaseNumpyAppTest): def test_dtype(self): @@ -12,7 +14,10 @@ assert dtype(d) is d assert dtype(None) is dtype(float) assert dtype('int8').name == 'int8' + assert dtype(int).fields is None + assert dtype(int).names is None raises(TypeError, dtype, 1042) + raises(KeyError, 'dtype(int)["asdasd"]') def test_dtype_eq(self): from _numpypy import dtype @@ -53,13 +58,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from _numpypy import array, False_, True_, int64 + from _numpypy import array, False_, longlong a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], int64) + assert isinstance(a[0], longlong) b = a.copy() - assert isinstance(b[0], int64) + assert isinstance(b[0], longlong) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -81,17 +86,17 @@ assert a[i] is True_ def test_zeros_long(self): - from _numpypy import zeros, int64 + from _numpypy import zeros, longlong a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 0 def test_ones_long(self): - from _numpypy import ones, int64 + from _numpypy import ones, longlong a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 1 def test_overflow(self): @@ -181,17 +186,18 @@ assert dtype("float") is dtype(float) -class AppTestTypes(BaseNumpyAppTest): +class AppTestTypes(BaseNumpyAppTest): def test_abstract_types(self): import _numpypy as numpy raises(TypeError, numpy.generic, 0) raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'signedinteger' instances" + assert 'cannot create' in str(exc.value) + assert 'signedinteger' in str(exc.value) exc = raises(TypeError, numpy.unsignedinteger, 0) - assert str(exc.value) == "cannot create 'unsignedinteger' instances" - + assert 'cannot create' in str(exc.value) + assert 'unsignedinteger' in str(exc.value) raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -296,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -327,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys @@ -404,10 +407,29 @@ assert issubclass(int64, int) assert int_ is int64 + def test_various_types(self): + import _numpypy as numpy + import sys + + assert numpy.int16 is numpy.short + assert numpy.int8 is numpy.byte + assert numpy.bool_ is numpy.bool8 + if sys.maxint == (1 << 63) - 1: + assert numpy.intp is numpy.int64 + else: + assert numpy.intp is numpy.int32 + + def test_mro(self): + import _numpypy as numpy + + assert numpy.int16.__mro__ == (numpy.int16, numpy.signedinteger, + numpy.integer, numpy.number, + numpy.generic, object) + assert numpy.bool_.__mro__ == (numpy.bool_, numpy.generic, object) + def test_operators(self): from operator import truediv from _numpypy import float64, int_, True_, False_ - assert 5 / int_(2) == int_(2) assert truediv(int_(3), int_(2)) == float64(1.5) assert truediv(3, int_(2)) == float64(1.5) @@ -427,9 +449,115 @@ assert int_(3) ^ int_(5) == int_(6) assert True_ ^ False_ is True_ assert 5 ^ int_(3) == int_(6) - assert +int_(3) == int_(3) assert ~int_(3) == int_(-4) - raises(TypeError, lambda: float64(3) & 1) + def test_alternate_constructs(self): + from _numpypy import dtype + nnp = self.non_native_prefix + byteorder = self.native_prefix + assert dtype('i8') == dtype(byteorder + 'i8') == dtype('=i8') # XXX should be equal == dtype(long) + assert dtype(nnp + 'i8') != dtype('i8') + assert dtype(nnp + 'i8').byteorder == nnp + assert dtype('=i8').byteorder == '=' + assert dtype(byteorder + 'i8').byteorder == '=' + + def test_alignment(self): + from _numpypy import dtype + assert dtype('i4').alignment == 4 + + def test_typeinfo(self): + from _numpypy import typeinfo, void, number, int64, bool_ + assert typeinfo['Number'] == number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, -9223372036854775808L, int64) + assert typeinfo['VOID'] == ('V', 20, 0, 1, void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) + +class AppTestStrUnicodeDtypes(BaseNumpyAppTest): + def test_str_unicode(self): + from _numpypy import str_, unicode_, character, flexible, generic + + assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] + assert unicode_.mro() == [unicode_, unicode, basestring, character, flexible, generic, object] + + def test_str_dtype(self): + from _numpypy import dtype, str_ + + raises(TypeError, "dtype('Sx')") + d = dtype('S8') + assert d.itemsize == 8 + assert dtype(str) == dtype('S') + assert d.kind == 'S' + assert d.type is str_ + assert d.name == "string64" + assert d.num == 18 + + def test_unicode_dtype(self): + from _numpypy import dtype, unicode_ + + raises(TypeError, "dtype('Ux')") + d = dtype('U8') + assert d.itemsize == 8 * 4 + assert dtype(unicode) == dtype('U') + assert d.kind == 'U' + assert d.type is unicode_ + assert d.name == "unicode256" + assert d.num == 19 + + def test_string_boxes(self): + from _numpypy import str_ + assert isinstance(str_(3), str_) + + def test_unicode_boxes(self): + from _numpypy import unicode_ + assert isinstance(unicode_(3), unicode) + +class AppTestRecordDtypes(BaseNumpyAppTest): + def test_create(self): + from _numpypy import dtype, void + + raises(ValueError, "dtype([('x', int), ('x', float)])") + d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) + assert d.fields['x'] == (dtype('int32'), 0) + assert d.fields['value'] == (dtype(float), 12) + assert d['x'] == dtype('int32') + assert d.name == "void160" + assert d.num == 20 + assert d.itemsize == 20 + assert d.kind == 'V' + assert d.type is void + assert d.char == 'V' + assert d.names == ("x", "y", "z", "value") + raises(KeyError, 'd["xyz"]') + raises(KeyError, 'd.fields["xyz"]') + + def test_create_from_dict(self): + skip("not yet") + from _numpypy import dtype + d = dtype({'names': ['a', 'b', 'c'], + }) + +class AppTestNotDirect(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + def check_non_native(w_obj, w_obj2): + assert w_obj.storage[0] == w_obj2.storage[1] + assert w_obj.storage[1] == w_obj2.storage[0] + if w_obj.storage[0] == '\x00': + assert w_obj2.storage[1] == '\x00' + assert w_obj2.storage[0] == '\x01' + else: + assert w_obj2.storage[1] == '\x01' + assert w_obj2.storage[0] == '\x00' + cls.w_check_non_native = cls.space.wrap(interp2app(check_non_native)) + if option.runappdirect: + py.test.skip("not a direct test") + + def test_non_native(self): + from _numpypy import array + a = array([1, 2, 3], dtype=self.non_native_prefix + 'i2') + assert a[0] == 1 + assert (a + a)[1] == 4 + self.check_non_native(a, array([1, 2, 3], 'i2')) + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -5,15 +5,23 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy import signature from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.interp_iter import Chunk +from pypy.module.micronumpy.interp_iter import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class MockDtype(object): - def malloc(self, size): - return None + class itemtype(object): + @staticmethod + def malloc(size): + return None + def get_size(self): + return 1 + + +def create_slice(a, chunks): + return Chunks(chunks).apply(a) class TestNumArrayDirect(object): def newslice(self, *args): @@ -29,116 +37,116 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -203,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) @@ -416,6 +436,7 @@ from numpypy.core.numeric import newaxis a = array(range(5)) b = a[newaxis] + assert b.shape == (1, 5) assert (b[0,1:] == a[1:]).all() def test_slice_then_newaxis(self): @@ -1140,7 +1161,7 @@ assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - assert array([1L, 2, 3]).dtype is dtype(long) + #assert array([1L, 2, 3]).dtype is dtype(long) assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1647,6 +1668,7 @@ a = arange(12).reshape(3,4) b = a.T.flat b[6::2] = [-1, -2] + print a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]] assert (a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]]).all() b[0:2] = [[[100]]] assert(a[0,0] == 100) @@ -1921,6 +1943,12 @@ #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) + def test_tostring(self): + from _numpypy import array + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' class AppTestRanges(BaseNumpyAppTest): def test_arange(self): @@ -1966,3 +1994,57 @@ cache = get_appbridge_cache(cls.space) cache.w_array_repr = cls.old_array_repr cache.w_array_str = cls.old_array_str + +class AppTestRecordDtype(BaseNumpyAppTest): + def test_zeros(self): + from _numpypy import zeros + a = zeros(2, dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]["xyz"]') + assert a[0]['x'] == 0 + assert a[0]['y'] == 0 + raises(ValueError, "a[0] = (1, 2, 3)") + a[0]['x'] = 13 + assert a[0]['x'] == 13 + a[1] = (1, 2) + assert a[1]['y'] == 2 + b = zeros(2, dtype=[('x', int), ('y', float)]) + b[1] = a[1] + assert a[1]['y'] == 2 + + def test_views(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + raises(ValueError, 'array([1])["x"]') + raises(ValueError, 'a["z"]') + assert a['x'][1] == 3 + assert a['y'][1] == 4 + a['x'][0] = 15 + assert a['x'][0] == 15 + b = a['x'] + a['y'] + assert (b == [15+2, 3+4]).all() + assert b.dtype == float + + def test_assign_tuple(self): + from _numpypy import zeros + a = zeros((2, 3), dtype=[('x', int), ('y', float)]) + a[1, 2] = (1, 2) + assert a['x'][1, 2] == 1 + assert a['y'][1, 2] == 2 + + def test_creation_and_repr(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + assert repr(a[0]) == '(1, 2.0)' + + def test_nested_dtype(self): + from _numpypy import zeros + a = [('x', int), ('y', float)] + b = [('x', int), ('y', a)] + arr = zeros(3, dtype=b) + arr[1]['x'] = 15 + assert arr[1]['x'] == 15 + arr[1]['y']['y'] = 3.5 + assert arr[1]['y']['y'] == 3.5 + assert arr[1]['y']['x'] == 0.0 + assert arr[1]['x'] == 15 + diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,15 +1,20 @@ import functools import math +import struct from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi -from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rlib.objectmodel import specialize, we_are_translated +from pypy.rlib.rarithmetic import widen, byteswap from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.rstruct.runpack import runpack +from pypy.tool.sourcetools import func_with_new_name +from pypy.rlib import jit +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_void': True}) degToRad = math.pi / 180.0 log2 = math.log(2) @@ -59,9 +64,20 @@ return dispatcher class BaseType(object): + _attrs_ = () + def _unimplemented_ufunc(self, *args): raise NotImplementedError + def malloc(self, size): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, size, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True) + + def __repr__(self): + return self.__class__.__name__ + class Primitive(object): _mixin_ = True @@ -76,7 +92,7 @@ assert isinstance(box, self.BoxType) return box.value - def coerce(self, space, w_item): + def coerce(self, space, dtype, w_item): if isinstance(w_item, self.BoxType): return w_item return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) @@ -97,32 +113,41 @@ def default_fromstring(self, space): raise NotImplementedError - def read(self, storage, width, i, offset): - return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset - )) + def _read(self, storage, width, i, offset): + if we_are_translated(): + return libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + return libffi.array_getitem_T(self.T, width, storage, i, offset) - def read_bool(self, storage, width, i, offset): - return bool(self.for_computation( - libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset))) + def read(self, arr, width, i, offset, dtype=None): + return self.box(self._read(arr.storage, width, i, offset)) - def store(self, storage, width, i, offset, box): - value = self.unbox(box) - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + def read_bool(self, arr, width, i, offset): + return bool(self.for_computation(self._read(arr.storage, width, i, offset))) + + def _write(self, storage, width, i, offset, value): + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + + def store(self, arr, width, i, offset, box): + self._write(arr.storage, width, i, offset, self.unbox(box)) def fill(self, storage, width, box, start, stop, offset): value = self.unbox(box) - for i in xrange(start, stop): - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + for i in xrange(start, stop, width): + self._write(storage, 1, i, offset, value) def runpack_str(self, s): return self.box(runpack(self.format_code, s)) + def pack_str(self, box): + return struct.pack(self.format_code, self.unbox(box)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -214,8 +239,31 @@ def min(self, v1, v2): return min(v1, v2) +class NonNativePrimitive(Primitive): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + return byteswap(res) + + def _write(self, storage, width, i, offset, value): + value = byteswap(value) + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + return struct.pack(self.format_code, byteswap(self.unbox(box))) class Bool(BaseType, Primitive): + _attrs_ = () + T = lltype.Bool BoxType = interp_boxes.W_BoolBox format_code = "?" @@ -242,8 +290,7 @@ return space.wrap(self.unbox(w_item)) def str_format(self, box): - value = self.unbox(box) - return "True" if value else "False" + return "True" if self.unbox(box) else "False" def for_computation(self, v): return int(v) @@ -267,15 +314,18 @@ def invert(self, v): return ~v +NonNativeBool = Bool + class Integer(Primitive): _mixin_ = True + def _base_coerce(self, space, w_item): + return self.box(space.int_w(space.call_function(space.w_int, w_item))) def _coerce(self, space, w_item): - return self.box(space.int_w(space.call_function(space.w_int, w_item))) + return self._base_coerce(space, w_item) def str_format(self, box): - value = self.unbox(box) - return str(self.for_computation(value)) + return str(self.for_computation(self.unbox(box))) def for_computation(self, v): return widen(v) @@ -347,68 +397,170 @@ def invert(self, v): return ~v +class NonNativeInteger(NonNativePrimitive, Integer): + _mixin_ = True + class Int8(BaseType, Integer): + _attrs_ = () + T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box format_code = "b" +NonNativeInt8 = Int8 class UInt8(BaseType, Integer): + _attrs_ = () + T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box format_code = "B" +NonNativeUInt8 = UInt8 class Int16(BaseType, Integer): + _attrs_ = () + + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + format_code = "h" + +class NonNativeInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.SHORT BoxType = interp_boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): + _attrs_ = () + + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + format_code = "H" + +class NonNativeUInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): + _attrs_ = () + + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + format_code = "i" + +class NonNativeInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.INT BoxType = interp_boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): + _attrs_ = () + + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + format_code = "I" + +class NonNativeUInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.UINT BoxType = interp_boxes.W_UInt32Box format_code = "I" class Long(BaseType, Integer): + _attrs_ = () + + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + +class NonNativeLong(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.LONG BoxType = interp_boxes.W_LongBox format_code = "l" class ULong(BaseType, Integer): + _attrs_ = () + T = rffi.ULONG BoxType = interp_boxes.W_ULongBox format_code = "L" +class NonNativeULong(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): + _attrs_ = () + T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +class NonNativeInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + format_code = "q" + + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +def _uint64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class UInt64(BaseType, Integer): + _attrs_ = () + T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box format_code = "Q" - def _coerce(self, space, w_item): - try: - return Integer._coerce(self, space, w_item) - except OperationError, e: - if not e.match(space, space.w_OverflowError): - raise - bigint = space.bigint_w(w_item) - try: - value = bigint.toulonglong() - except OverflowError: - raise OperationError(space.w_OverflowError, space.w_None) - return self.box(value) + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + +class NonNativeUInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + format_code = "Q" + + _coerce = func_with_new_name(_uint64_coerce, '_coerce') class Float(Primitive): _mixin_ = True @@ -417,8 +569,8 @@ return self.box(space.float_w(space.call_function(space.w_float, w_item))) def str_format(self, box): - value = self.unbox(box) - return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + return float2string(self.for_computation(self.unbox(box)), "g", + rfloat.DTSF_STR_PRECISION) def for_computation(self, v): return float(v) @@ -702,13 +854,158 @@ return -rfloat.INFINITY return rfloat.NAN +class NonNativeFloat(NonNativePrimitive, Float): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + #return byteswap(res) + return res + + def _write(self, storage, width, i, offset, value): + #value = byteswap(value) XXX + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + # XXX byteswap + return struct.pack(self.format_code, self.unbox(box)) + class Float32(BaseType, Float): + _attrs_ = () + T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box format_code = "f" +class NonNativeFloat32(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + format_code = "f" + class Float64(BaseType, Float): + _attrs_ = () + T = rffi.DOUBLE BoxType = interp_boxes.W_Float64Box format_code = "d" + +class NonNativeFloat64(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box + format_code = "d" + +class BaseStringType(object): + _mixin_ = True + + def __init__(self, size=0): + self.size = size + + def get_element_size(self): + return self.size * rffi.sizeof(self.T) + +class StringType(BaseType, BaseStringType): + T = lltype.Char + +class VoidType(BaseType, BaseStringType): + T = lltype.Char + +NonNativeVoidType = VoidType +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + +class RecordType(BaseType): + + T = lltype.Char + + def __init__(self, offsets_and_fields, size): + self.offsets_and_fields = offsets_and_fields + self.size = size + + def get_element_size(self): + return self.size + + def read(self, arr, width, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def coerce(self, space, dtype, w_item): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + if isinstance(w_item, interp_boxes.W_VoidBox): + return w_item + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(self.offsets_and_fields) != space.int_w(space.len(w_item)): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + # XXX optimize it out one day, but for now we just allocate an + # array + arr = W_NDimArray([1], dtype) + for i in range(len(items_w)): + subdtype = dtype.fields[dtype.fieldnames[i]][1] + ofs, itemtype = self.offsets_and_fields[i] + w_item = items_w[i] + w_box = itemtype.coerce(space, subdtype, w_item) + itemtype.store(arr, 1, 0, ofs, w_box) + return interp_boxes.W_VoidBox(arr, 0, arr.dtype) + + @jit.unroll_safe + def store(self, arr, _, i, ofs, box): + assert isinstance(box, interp_boxes.W_VoidBox) + for k in range(self.get_element_size()): + arr.storage[k + i] = box.arr.storage[k + box.ofs] + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + pieces = ["("] + first = True + for ofs, tp in self.offsets_and_fields: + if first: + first = False + else: + pieces.append(", ") + pieces.append(tp.str_format(tp.read(box.arr, 1, box.ofs, ofs))) + pieces.append(")") + return "".join(pieces) + +for tp in [Int32, Int64]: + if tp.T == lltype.Signed: + IntP = tp + break +for tp in [UInt32, UInt64]: + if tp.T == lltype.Unsigned: + UIntP = tp + break +del tp + +def _setup(): + # compute alignment + for tp in globals().values(): + if isinstance(tp, type) and hasattr(tp, 'T'): + tp.alignment = clibffi.cast_type_to_ffitype(tp.T).c_alignment +_setup() +del _setup diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -34,7 +34,7 @@ assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([ 0., 0., 0., ..., 0., 0., 0.])" - a = array(range(5), long) + a = array(range(5), int) if a.dtype.itemsize == int_size: assert repr(a) == "array([0, 1, 2, 3, 4])" else: @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -207,6 +207,11 @@ is_arguments(args) return w_some_obj() + def get_and_call_function(space, w_descr, w_obj, *args_w): + args = argument.Arguments(space, list(args_w)) + w_impl = space.get(w_descr, w_obj) + return space.call_args(w_impl, args) + def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,6 +545,8 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): + continue if isinstance(n, (str, unicode)) and len(n) == 1: continue assert n != 'default', ( diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -9,6 +9,7 @@ from pypy.rlib.rfloat import ( formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) from pypy.rlib import jit +from pypy.rlib.rarithmetic import intmask import math @@ -173,7 +174,7 @@ def hash__Complex(space, w_value): hashreal = _hash_float(space, w_value.realval) hashimg = _hash_float(space, w_value.imagval) - combined = hashreal + 1000003 * hashimg + combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) def add__Complex_Complex(space, w_complex1, w_complex2): diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -22,7 +22,7 @@ index = self.index w_length = space.len(self.w_seq) w_len = space.sub(w_length, space.wrap(index)) - if space.is_true(space.lt(w_len,space.wrap(0))): + if space.is_true(space.lt(w_len, space.wrap(0))): w_len = space.wrap(0) return w_len @@ -30,21 +30,21 @@ """Sequence iterator implementation for general sequences.""" class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing - directly their RPython-level list of wrapped objects. + """Sequence iterator specialized for lists, accessing directly their + RPython-level list of wrapped objects. """ class W_FastTupleIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for tuples, accessing - directly their RPython-level list of wrapped objects. - """ - def __init__(w_self, w_seq, wrappeditems): + """Sequence iterator specialized for tuples, accessing directly + their RPython-level list of wrapped objects. + """ + def __init__(w_self, w_seq, wrappeditems): W_AbstractSeqIterObject.__init__(w_self, w_seq) w_self.tupleitems = wrappeditems class W_ReverseSeqIterObject(W_Object): from pypy.objspace.std.itertype import reverse_iter_typedef as typedef - + def __init__(w_self, space, w_seq, index=-1): w_self.w_seq = w_seq w_self.w_len = space.len(w_seq) @@ -61,15 +61,15 @@ def next__SeqIter(space, w_seqiter): if w_seqiter.w_seq is None: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) - w_seqiter.index += 1 + raise OperationError(space.w_StopIteration, space.w_None) + w_seqiter.index += 1 return w_item # XXX __length_hint__() @@ -89,7 +89,7 @@ except IndexError: w_seqiter.tupleitems = None w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -112,7 +112,7 @@ w_item = w_seq.getitem(index) except IndexError: w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -126,15 +126,15 @@ def next__ReverseSeqIter(space, w_seqiter): if w_seqiter.w_seq is None or w_seqiter.index < 0: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) - w_seqiter.index -= 1 + w_seqiter.index -= 1 except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) return w_item # XXX __length_hint__() diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash diff --git a/pypy/rlib/bitmanipulation.py b/pypy/rlib/bitmanipulation.py --- a/pypy/rlib/bitmanipulation.py +++ b/pypy/rlib/bitmanipulation.py @@ -1,5 +1,6 @@ from pypy.rlib import unroll + class BitSplitter(dict): def __getitem__(self, lengths): if isinstance(lengths, int): diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -233,6 +233,7 @@ (rffi.LONGLONG, _signed_type_for(rffi.LONGLONG)), (lltype.UniChar, _unsigned_type_for(lltype.UniChar)), (lltype.Bool, _unsigned_type_for(lltype.Bool)), + (lltype.Char, _signed_type_for(lltype.Char)), ] __float_type_map = [ diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -429,6 +429,11 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False +def array_getitem_T(TYPE, width, addr, index, offset): + addr = rffi.ptradd(addr, index * width) + addr = rffi.ptradd(addr, offset) + return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] + @specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): @@ -439,3 +444,8 @@ rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return assert False + +def array_setitem_T(TYPE, width, addr, index, offset, value): + addr = rffi.ptradd(addr, index * width) + addr = rffi.ptradd(addr, offset) + rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -5,7 +5,12 @@ long long to a float and back to a long long. There are corner cases in which it does not work. """ + +from pypy.annotation import model as annmodel +from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.translator.tool.cbuild import ExternalCompilationInfo # -------- implement longlong2float and float2longlong -------- @@ -16,38 +21,33 @@ # these definitions are used only in tests, when not translated def longlong2float_emulator(llval): - d_array = lltype.malloc(DOUBLE_ARRAY_PTR.TO, 1, flavor='raw') - ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) - ll_array[0] = llval - floatval = d_array[0] - lltype.free(d_array, flavor='raw') - return floatval + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: + ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) + ll_array[0] = llval + floatval = d_array[0] + return floatval -def float2longlong_emulator(floatval): - d_array = lltype.malloc(DOUBLE_ARRAY_PTR.TO, 1, flavor='raw') - ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) - d_array[0] = floatval - llval = ll_array[0] - lltype.free(d_array, flavor='raw') - return llval +def float2longlong(floatval): + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: + ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) + d_array[0] = floatval + llval = ll_array[0] + return llval def uint2singlefloat_emulator(ival): - f_array = lltype.malloc(FLOAT_ARRAY_PTR.TO, 1, flavor='raw') - i_array = rffi.cast(UINT_ARRAY_PTR, f_array) - i_array[0] = ival - singlefloatval = f_array[0] - lltype.free(f_array, flavor='raw') - return singlefloatval + with lltype.scoped_alloc(FLOAT_ARRAY_PTR.TO, 1) as f_array: + i_array = rffi.cast(UINT_ARRAY_PTR, f_array) + i_array[0] = ival + singlefloatval = f_array[0] + return singlefloatval def singlefloat2uint_emulator(singlefloatval): - f_array = lltype.malloc(FLOAT_ARRAY_PTR.TO, 1, flavor='raw') - i_array = rffi.cast(UINT_ARRAY_PTR, f_array) - f_array[0] = singlefloatval - ival = i_array[0] - lltype.free(f_array, flavor='raw') - return ival + with lltype.scoped_alloc(FLOAT_ARRAY_PTR.TO, 1) as f_array: + i_array = rffi.cast(UINT_ARRAY_PTR, f_array) + f_array[0] = singlefloatval + ival = i_array[0] + return ival -from pypy.translator.tool.cbuild import ExternalCompilationInfo eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" static double pypy__longlong2float(long long x) { @@ -56,12 +56,6 @@ memcpy(&dd, &x, 8); return dd; } -static long long pypy__float2longlong(double x) { - long long ll; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&ll, &x, 8); - return ll; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -82,12 +76,6 @@ _nowrapper=True, elidable_function=True, sandboxsafe=True, oo_primitive="pypy__longlong2float") -float2longlong = rffi.llexternal( - "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, - _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__float2longlong") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,3 +87,15 @@ _callable=singlefloat2uint_emulator, compilation_info=eci, _nowrapper=True, elidable_function=True, sandboxsafe=True, oo_primitive="pypy__singlefloat2uint") + + +class Float2LongLongEntry(ExtRegistryEntry): + _about_ = float2longlong + + def compute_result_annotation(self, s_float): + assert annmodel.SomeFloat().contains(s_float) + return annmodel.SomeInteger(knowntype=r_int64) + + def specialize_call(self, hop): + [v_float] = hop.inputargs(lltype.Float) + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) diff --git a/pypy/rlib/rarithmetic.py b/pypy/rlib/rarithmetic.py --- a/pypy/rlib/rarithmetic.py +++ b/pypy/rlib/rarithmetic.py @@ -569,3 +569,37 @@ if not objectmodel.we_are_translated(): assert n <= p return llop.int_between(lltype.Bool, n, m, p) + + at objectmodel.specialize.ll() +def byteswap(arg): + """ Convert little->big endian and the opposite + """ + from pypy.rpython.lltypesystem import lltype, rffi + + T = lltype.typeOf(arg) + # XXX we cannot do arithmetics on small ints + if isinstance(arg, base_int): + arg = widen(arg) + if rffi.sizeof(T) == 1: + res = arg + elif rffi.sizeof(T) == 2: + a, b = arg & 0xFF, arg & 0xFF00 + res = (a << 8) | (b >> 8) + elif rffi.sizeof(T) == 4: + FF = r_uint(0xFF) + arg = r_uint(arg) + a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16), + arg & (FF << 24)) + res = (a << 24) | (b << 8) | (c >> 8) | (d >> 24) + elif rffi.sizeof(T) == 8: + FF = r_ulonglong(0xFF) + arg = r_ulonglong(arg) + a, b, c, d = (arg & FF, arg & (FF << 8), arg & (FF << 16), + arg & (FF << 24)) + e, f, g, h = (arg & (FF << 32), arg & (FF << 40), arg & (FF << 48), + arg & (FF << 56)) + res = ((a << 56) | (b << 40) | (c << 24) | (d << 8) | (e >> 8) | + (f >> 24) | (g >> 40) | (h >> 56)) + else: + assert False # unreachable code + return rffi.cast(T, res) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -1,11 +1,13 @@ """Float constants""" import math + +from pypy.annotation.model import SomeString +from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib import objectmodel -from pypy.rpython.extfunc import register_external -from pypy.annotation.model import SomeString + USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -74,7 +76,7 @@ while i < len(s) and s[i] in '0123456789': after_point += s[i] i += 1 - + if i == len(s): return sign, before_point, after_point, exponent @@ -91,7 +93,7 @@ if i == len(s): raise ValueError - + while i < len(s) and s[i] in '0123456789': exponent += s[i] i += 1 diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -228,6 +228,7 @@ # XXX should be propagate the real type, allowing # for 2*sys.maxint? high = high_ref[0] + high = rffi.cast(lltype.Signed, high) # low might just happen to have the value INVALID_FILE_SIZE # so we need to check the last error also INVALID_FILE_SIZE = -1 @@ -550,7 +551,7 @@ FILE_BEGIN = 0 high_ref = lltype.malloc(PLONG.TO, 1, flavor='raw') try: - high_ref[0] = newsize_high + high_ref[0] = rffi.cast(LONG, newsize_high) SetFilePointer(self.file_handle, newsize_low, high_ref, FILE_BEGIN) finally: @@ -712,7 +713,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + # XXX flags is or-ed into access by now. + # check size boundaries _check_map_size(length) map_size = length @@ -794,6 +797,7 @@ offset_hi = 0 offset_lo = offset + flProtect |= flags m.map_handle = CreateFileMapping(m.file_handle, NULL, flProtect, size_hi, size_lo, m.tagname) @@ -811,6 +815,11 @@ m.map_handle = INVALID_HANDLE raise winerror + class Hint: + pos = -0x4fff0000 # for reproducible results + hint = Hint() + # XXX this has no effect on windows + def alloc(map_size): """Allocate memory. This is intended to be used by the JIT, so the memory has the executable bit set. diff --git a/pypy/rlib/rstruct/nativefmttable.py b/pypy/rlib/rstruct/nativefmttable.py --- a/pypy/rlib/rstruct/nativefmttable.py +++ b/pypy/rlib/rstruct/nativefmttable.py @@ -3,14 +3,17 @@ The table 'native_fmttable' is also used by pypy.module.array.interp_array. """ import struct -from pypy.rlib import jit + +from pypy.rlib import jit, longlong2float +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import r_singlefloat, widen from pypy.rlib.rstruct import standardfmttable as std from pypy.rlib.rstruct.error import StructError +from pypy.rlib.unroll import unrolling_iterable +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.tool import rffi_platform -from pypy.rpython.lltypesystem import lltype, rffi -from pypy.rlib.rarithmetic import r_singlefloat from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib.objectmodel import specialize + native_is_bigendian = struct.pack("=i", 1) == struct.pack(">i", 1) @@ -23,18 +26,24 @@ # ____________________________________________________________ + double_buf = lltype.malloc(rffi.DOUBLEP.TO, 1, flavor='raw', immortal=True) float_buf = lltype.malloc(rffi.FLOATP.TO, 1, flavor='raw', immortal=True) - at jit.dont_look_inside -def double_to_ccharp(doubleval): - double_buf[0] = doubleval - return rffi.cast(rffi.CCHARP, double_buf) +range_8_unroll = unrolling_iterable(list(reversed(range(8)))) +range_4_unroll = unrolling_iterable(list(reversed(range(4)))) def pack_double(fmtiter): doubleval = fmtiter.accept_float_arg() - p = double_to_ccharp(doubleval) - fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.DOUBLE)) + value = longlong2float.float2longlong(doubleval) + if fmtiter.bigendian: + for i in range_8_unroll: + x = (value >> (8*i)) & 0xff + fmtiter.result.append(chr(x)) + else: + for i in range_8_unroll: + fmtiter.result.append(chr(value & 0xff)) + value >>= 8 @specialize.argtype(0) def unpack_double(fmtiter): @@ -45,16 +54,19 @@ doubleval = double_buf[0] fmtiter.appendobj(doubleval) - at jit.dont_look_inside -def float_to_ccharp(floatval): - float_buf[0] = floatval - return rffi.cast(rffi.CCHARP, float_buf) - def pack_float(fmtiter): doubleval = fmtiter.accept_float_arg() floatval = r_singlefloat(doubleval) - p = float_to_ccharp(floatval) - fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.FLOAT)) + value = longlong2float.singlefloat2uint(floatval) + value = widen(value) + if fmtiter.bigendian: + for i in range_4_unroll: + x = (value >> (8*i)) & 0xff + fmtiter.result.append(chr(x)) + else: + for i in range_4_unroll: + fmtiter.result.append(chr(value & 0xff)) + value >>= 8 @specialize.argtype(0) def unpack_float(fmtiter): diff --git a/pypy/rlib/rstruct/runpack.py b/pypy/rlib/rstruct/runpack.py --- a/pypy/rlib/rstruct/runpack.py +++ b/pypy/rlib/rstruct/runpack.py @@ -4,11 +4,10 @@ """ import py -from struct import pack, unpack +from struct import unpack from pypy.rlib.rstruct.formatiterator import FormatIterator from pypy.rlib.rstruct.error import StructError from pypy.rlib.rstruct.nativefmttable import native_is_bigendian -from pypy.rpython.extregistry import ExtRegistryEntry class MasterReader(object): def __init__(self, s): diff --git a/pypy/rlib/rstruct/standardfmttable.py b/pypy/rlib/rstruct/standardfmttable.py --- a/pypy/rlib/rstruct/standardfmttable.py +++ b/pypy/rlib/rstruct/standardfmttable.py @@ -6,11 +6,12 @@ # values when packing. import struct + +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong +from pypy.rlib.rstruct import ieee from pypy.rlib.rstruct.error import StructError, StructOverflowError -from pypy.rlib.rstruct import ieee from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from pypy.rlib.objectmodel import specialize # In the CPython struct module, pack() unconsistently accepts inputs # that are out-of-range or floats instead of ints. Should we emulate diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/rlib/test/test_rarithmetic.py b/pypy/rlib/test/test_rarithmetic.py --- a/pypy/rlib/test/test_rarithmetic.py +++ b/pypy/rlib/test/test_rarithmetic.py @@ -383,3 +383,9 @@ assert not int_between(1, 2, 2) assert not int_between(1, 1, 1) +def test_byteswap(): + from pypy.rpython.lltypesystem import rffi + + assert byteswap(rffi.cast(rffi.USHORT, 0x0102)) == 0x0201 + assert byteswap(rffi.cast(rffi.INT, 0x01020304)) == 0x04030201 + assert byteswap(rffi.cast(rffi.ULONGLONG, 0x0102030405060708L)) == 0x0807060504030201L diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -770,6 +770,10 @@ checkadr(adr) return llmemory.cast_adr_to_int(adr, mode) + def op_convert_float_bytes_to_longlong(self, f): + from pypy.rlib import longlong2float + return longlong2float.float2longlong(f) + def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -349,6 +349,7 @@ 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() + 'convert_float_bytes_to_longlong': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/module/ll_time.py b/pypy/rpython/module/ll_time.py --- a/pypy/rpython/module/ll_time.py +++ b/pypy/rpython/module/ll_time.py @@ -9,7 +9,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.extfunc import BaseLazyRegistering, registering, extdef from pypy.rlib import rposix -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, maxint32 from pypy.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -177,7 +177,7 @@ @registering(time.sleep) def register_time_sleep(self): if sys.platform == 'win32': - MAX = sys.maxint + MAX = maxint32 Sleep = self.llexternal('Sleep', [rffi.ULONG], lltype.Void) def time_sleep_llimpl(secs): millisecs = secs * 1000.0 diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -1295,6 +1295,8 @@ for meth in self.overloadings: ARGS = meth._TYPE.ARGS if ARGS in signatures: + # XXX Conflict on 'Signed' vs 'SignedLongLong' on win64. + # XXX note that this partially works if this error is ignored. raise TypeError, 'Bad overloading' signatures.add(ARGS) diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -27,7 +27,7 @@ #define OP_FLOAT_SUB(x,y,r) r = x - y #define OP_FLOAT_MUL(x,y,r) r = x * y #define OP_FLOAT_TRUEDIV(x,y,r) r = x / y -#define OP_FLOAT_POW(x,y,r) r = pow(x, y) +#define OP_FLOAT_POW(x,y,r) r = pow(x, y) /*** conversions ***/ @@ -42,5 +42,6 @@ #ifdef HAVE_LONG_LONG #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) +#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) #endif diff --git a/pypy/translator/cli/sdk.py b/pypy/translator/cli/sdk.py --- a/pypy/translator/cli/sdk.py +++ b/pypy/translator/cli/sdk.py @@ -103,6 +103,11 @@ mono_bin = find_mono_on_windows() if mono_bin is not None: SDK.ILASM = os.path.join(mono_bin, 'ilasm2.bat') + # XXX the failing tests are boring, and the SDK is usually installed + # on windows. I do not care right now, because the Linux buildbots + # don't test this at all... + if platform.architecture()[0] == '64bit': + py.test.skip('mono on 64bit is not well enough supported') else: SDK = MonoSDK return SDK diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,6 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,7 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -4,6 +4,20 @@ """ __all__ = ['main'] +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") + from _pytest.core import main, UsageError, _preloadplugins from _pytest import core as cmdline from _pytest import __version__ From noreply at buildbot.pypy.org Fri Mar 23 17:29:13 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Mar 2012 17:29:13 +0100 (CET) Subject: [pypy-commit] pypy default: kill a relative import Message-ID: <20120323162913.6854C82112@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53945:ab2e3beecbde Date: 2012-03-23 12:27 -0400 http://bitbucket.org/pypy/pypy/changeset/ab2e3beecbde/ Log: kill a relative import diff --git a/pypy/rlib/parsing/pypackrat.py b/pypy/rlib/parsing/pypackrat.py --- a/pypy/rlib/parsing/pypackrat.py +++ b/pypy/rlib/parsing/pypackrat.py @@ -1,6 +1,8 @@ from pypy.rlib.parsing.tree import Nonterminal, Symbol -from makepackrat import PackratParser, BacktrackException, Status +from pypy.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status + + class Parser(object): def NAME(self): return self._NAME().result From noreply at buildbot.pypy.org Fri Mar 23 17:29:14 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Mar 2012 17:29:14 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20120323162914.C9BE082112@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53946:6b7f1e0d0f31 Date: 2012-03-23 12:28 -0400 http://bitbucket.org/pypy/pypy/changeset/6b7f1e0d0f31/ Log: merged upstream diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,70 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 miles view: + +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + The RPython standard library is to be found in the ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* JIT + + xxx + +* Garbage Collectors + + xxx + +Toolset +======= + +xxx diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,7 +601,9 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers. + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -90,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -179,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -359,6 +359,7 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -386,23 +387,6 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - self.w_longlongdtype = W_Dtype( - types.Int64(), - num=9, - kind=SIGNEDLTR, - name='int64', - char='q', - w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), - alternate_constructors=[space.w_long], - ) - self.w_ulonglongdtype = W_Dtype( - types.UInt64(), - num=10, - kind=UNSIGNEDLTR, - name='uint64', - char='Q', - w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), - ) self.w_stringdtype = W_Dtype( types.StringType(1), num=18, @@ -435,17 +419,19 @@ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_longlongdtype, self.w_ulonglongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} - for dtype in self.builtin_dtypes: + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): self.dtypes_by_name[dtype.name] = dtype can_name = dtype.kind + str(dtype.itemtype.get_element_size()) self.dtypes_by_name[can_name] = dtype @@ -473,7 +459,7 @@ 'LONG': self.w_longdtype, 'UNICODE': self.w_unicodedtype, #'OBJECT', - 'ULONGLONG': self.w_ulonglongdtype, + 'ULONGLONG': self.w_uint64dtype, 'STRING': self.w_stringdtype, #'CDOUBLE', #'DATETIME', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1125,7 +1125,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1171,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -314,7 +314,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -302,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -333,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -500,6 +500,19 @@ BoxType = interp_boxes.W_ULongBox format_code = "L" +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): _attrs_ = () @@ -507,6 +520,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + class NonNativeInt64(BaseType, NonNativeInteger): _attrs_ = () @@ -514,6 +529,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + def _uint64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,7 +545,7 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue - if isinstance(n, (r_longlong, r_ulonglong)): + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): continue if isinstance(n, (str, unicode)) and len(n) == 1: continue diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Fri Mar 23 18:12:57 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 18:12:57 +0100 (CET) Subject: [pypy-commit] pypy default: a missing hop.exception_is_here Message-ID: <20120323171257.1413E82112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53947:773fac2f7006 Date: 2012-03-23 19:12 +0200 http://bitbucket.org/pypy/pypy/changeset/773fac2f7006/ Log: a missing hop.exception_is_here diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -165,6 +165,7 @@ v_char = hop.inputarg(rstr.char_repr, arg=1) v_left = hop.inputconst(Bool, left) v_right = hop.inputconst(Bool, right) + hop.exception_is_here() return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) def rtype_method_lstrip(self, hop): From noreply at buildbot.pypy.org Fri Mar 23 18:20:34 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Mar 2012 18:20:34 +0100 (CET) Subject: [pypy-commit] pypy default: test all the things Message-ID: <20120323172034.93EA382112@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53948:deac6fc76676 Date: 2012-03-23 19:20 +0200 http://bitbucket.org/pypy/pypy/changeset/deac6fc76676/ Log: test all the things diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -637,13 +637,16 @@ def _make_split_test(self, split_fn): const = self.const def fn(i): - s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = getattr(s, split_fn)(const('.')) - sum = 0 - for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) - return sum + len(l) * 100 + try: + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.')) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + except MemoryError: + return 42 return fn def test_split(self): From noreply at buildbot.pypy.org Fri Mar 23 18:38:34 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: disable IntDictStrategy for now: we cannot use it, since now 'ints' are acutally 'longs' Message-ID: <20120323173834.2E3C682112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53949:88bedcfe28c5 Date: 2012-03-23 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/88bedcfe28c5/ Log: disable IntDictStrategy for now: we cannot use it, since now 'ints' are acutally 'longs' diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -176,9 +176,11 @@ self.switch_to_string_strategy(w_dict) return w_type = self.space.type(w_key) - if self.space.is_w(w_type, self.space.w_int): - self.switch_to_int_strategy(w_dict) - elif withidentitydict and w_type.compares_by_identity(): + # XXX: disable IntDictStrategy for now, because in py3k ints are + # actually long + ## if self.space.is_w(w_type, self.space.w_int): + ## self.switch_to_int_strategy(w_dict) + if withidentitydict and w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -754,6 +754,7 @@ assert "StringDictStrategy" in self.get_strategy(d) def test_empty_to_int(self): + skip('IntDictStrategy is disabled for now, re-enable it!') import sys d = {} d[1] = "hi" From noreply at buildbot.pypy.org Fri Mar 23 18:38:35 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: add some TODO list Message-ID: <20120323173835.6BE5682112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53950:677b89e22d6f Date: 2012-03-23 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/677b89e22d6f/ Log: add some TODO list diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,8 @@ +kill Exception.message + +what to do with the ListRangeStrategy? We also have __builtin__.functional.W_Range + +run coverage against the parser/astbuilder/astcompiler: it's probably full of +dead code because the grammar changed + +re-enable IntDictStrategy From noreply at buildbot.pypy.org Fri Mar 23 18:38:36 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:36 +0100 (CET) Subject: [pypy-commit] pypy py3k: add w_unicode to the FakeObjSpace, it's needed for translating itertools Message-ID: <20120323173836.B0EDA82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53951:e491dca2c28c Date: 2012-03-23 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/e491dca2c28c/ Log: add w_unicode to the FakeObjSpace, it's needed for translating itertools diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -290,7 +290,7 @@ ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict', 'bytes', 'complex', 'slice', 'bool', - 'type', 'text', 'object']): + 'type', 'text', 'object', 'unicode']): setattr(FakeObjSpace, 'w_' + name, w_some_obj()) # for (name, _, arity, _) in ObjSpace.MethodTable: From noreply at buildbot.pypy.org Fri Mar 23 18:38:38 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: dtypes are now passed around as unicode Message-ID: <20120323173838.CEFD482112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53952:32925569e989 Date: 2012-03-23 18:07 +0100 http://bitbucket.org/pypy/pypy/changeset/32925569e989/ Log: dtypes are now passed around as unicode diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -198,7 +198,7 @@ return cache.w_float64dtype elif space.isinstance_w(w_dtype, w_subtype): return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): + elif space.isinstance_w(w_dtype, space.w_unicode): name = space.str_w(w_dtype) if ',' in name: return dtype_from_spec(space, name) From noreply at buildbot.pypy.org Fri Mar 23 18:38:41 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: __nonzero__ has been renamed to __bool__ in py3k Message-ID: <20120323173841.569BE82112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53953:84582006f0d7 Date: 2012-03-23 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/84582006f0d7/ Log: __nonzero__ has been renamed to __bool__ in py3k diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -268,7 +268,7 @@ __format__ = interp2app(W_GenericBox.descr_format), __int__ = interp2app(W_GenericBox.descr_int), __float__ = interp2app(W_GenericBox.descr_float), - __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + __bool__ = interp2app(W_GenericBox.descr_nonzero), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), From noreply at buildbot.pypy.org Fri Mar 23 18:38:42 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: s/buffer/memoryview Message-ID: <20120323173842.A279782112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53954:adbb724b5943 Date: 2012-03-23 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/adbb724b5943/ Log: s/buffer/memoryview diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -695,7 +695,7 @@ assert count > 0 s = s[count:] writeall(b'hello, ') - writeall(buffer(b'world!\n')) + writeall(memoryview(b'world!\n')) res = os.lseek(fd, 0, 0) assert res == 0 data = b'' From noreply at buildbot.pypy.org Fri Mar 23 18:38:44 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:38:44 +0100 (CET) Subject: [pypy-commit] pypy py3k: skip these numpypy tests for now, the applevel part of numpy has to been ported to python3 Message-ID: <20120323173844.C1DC982112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53955:f35ca49d1e27 Date: 2012-03-23 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/f35ca49d1e27/ Log: skip these numpypy tests for now, the applevel part of numpy has to been ported to python3 diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -6,3 +6,5 @@ dead code because the grammar changed re-enable IntDictStrategy + +unskip numpypy tests in module/test_lib_pypy/numpypy/ diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py @@ -2,6 +2,11 @@ class AppTestFromNumeric(BaseNumpyAppTest): + + def setup_class(cls): + import py + py.test.skip('the applevel parts are not ready for py3k') + def test_argmax(self): # tests taken from numpy/core/fromnumeric.py docstring from numpypy import array, arange, argmax diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -3,6 +3,11 @@ class AppTestBaseRepr(BaseNumpyAppTest): + + def setup_class(cls): + import py + py.test.skip('the applevel parts are not ready for py3k') + def test_base3(self): from numpypy import base_repr assert base_repr(3**5, 3) == '100000' @@ -21,6 +26,11 @@ assert base_repr(-12, 4) == '-30' class AppTestRepr(BaseNumpyAppTest): + + def setup_class(cls): + import py + py.test.skip('the applevel parts are not ready for py3k') + def test_repr(self): from numpypy import array assert repr(array([1, 2, 3, 4])) == 'array([1, 2, 3, 4])' diff --git a/pypy/module/test_lib_pypy/numpypy/test_numpy.py b/pypy/module/test_lib_pypy/numpypy/test_numpy.py --- a/pypy/module/test_lib_pypy/numpypy/test_numpy.py +++ b/pypy/module/test_lib_pypy/numpypy/test_numpy.py @@ -2,6 +2,9 @@ class AppTestNumpy: def setup_class(cls): + import py + py.test.skip('the applevel parts are not ready for py3k') + cls.space = gettestobjspace(usemodules=['micronumpy']) def test_imports(self): From noreply at buildbot.pypy.org Fri Mar 23 18:43:51 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 18:43:51 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill unicode_w from W_AbstractStringObject: in py3k, byte strings are not convertible to unicode using the default econding Message-ID: <20120323174351.2C15782112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: py3k Changeset: r53956:3c8ac35c653a Date: 2012-03-23 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/3c8ac35c653a/ Log: kill unicode_w from W_AbstractStringObject: in py3k, byte strings are not convertible to unicode using the default econding diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -36,20 +36,6 @@ return None return space.wrap(compute_unique_id(space.bytes_w(self))) - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef From noreply at buildbot.pypy.org Fri Mar 23 20:10:51 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 23 Mar 2012 20:10:51 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20120323191051.4FE6082112@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53957:fd7331abbb59 Date: 2012-03-23 20:09 +0100 http://bitbucket.org/pypy/pypy/changeset/fd7331abbb59/ Log: Merge with default diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -16,7 +16,9 @@ It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often the development means progressing in an unrelated corner, one unittest - at a time and then flipping a giant switch. It's worth repeating - PyPy + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy approach is great if you do TDD, not so great otherwise. * PyPy uses an entirely different set of tools - most of them included @@ -25,21 +27,21 @@ Architecture ============ -PyPy has layers. The 100 mile view: +PyPy has layers. The 100 miles view: -* `RPython`_ is a language in which we write interpreter in PyPy. Not the entire - PyPy project is written in RPython, only parts that are compiled in +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, it's compiled from the live python objects, which make it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. - RPython standard library is to be found in ``rlib`` subdirectory. + The RPython standard library is to be found in the ``rlib`` subdirectory. .. _`RPython`: coding-guide.html#RPython -* Translation toolchain - this is the part that takes care about translating - RPython to flow graphs and then to C. There is more in `architecture`_ +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. diff --git a/pypy/rlib/parsing/pypackrat.py b/pypy/rlib/parsing/pypackrat.py --- a/pypy/rlib/parsing/pypackrat.py +++ b/pypy/rlib/parsing/pypackrat.py @@ -1,6 +1,8 @@ from pypy.rlib.parsing.tree import Nonterminal, Symbol -from makepackrat import PackratParser, BacktrackException, Status +from pypy.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status + + class Parser(object): def NAME(self): return self._NAME().result diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -165,6 +165,7 @@ v_char = hop.inputarg(rstr.char_repr, arg=1) v_left = hop.inputconst(Bool, left) v_right = hop.inputconst(Bool, right) + hop.exception_is_here() return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) def rtype_method_lstrip(self, hop): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -637,13 +637,16 @@ def _make_split_test(self, split_fn): const = self.const def fn(i): - s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = getattr(s, split_fn)(const('.')) - sum = 0 - for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) - return sum + len(l) * 100 + try: + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.')) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + except MemoryError: + return 42 return fn def test_split(self): From noreply at buildbot.pypy.org Fri Mar 23 20:33:19 2012 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 23 Mar 2012 20:33:19 +0100 (CET) Subject: [pypy-commit] pypy default: we cannot call self.rand() when using -A Message-ID: <20120323193319.E870282112@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r53958:ea2751a04d47 Date: 2012-03-23 20:32 +0100 http://bitbucket.org/pypy/pypy/changeset/ea2751a04d47/ Log: we cannot call self.rand() when using -A diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -244,8 +244,12 @@ skip("disabled") if self.runappdirect: total = 500000 + def rand(): + import random + return random.randrange(0, 5) else: total = 50 + rand = self.rand # class A(object): hash = None @@ -256,7 +260,7 @@ a = A() a.next = tail.next tail.next = a - for j in range(self.rand()): + for j in range(rand()): any = any.next if any.hash is None: any.hash = hash(any) From noreply at buildbot.pypy.org Sat Mar 24 00:59:22 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Sat, 24 Mar 2012 00:59:22 +0100 (CET) Subject: [pypy-commit] pypy default: On Windows, tests using os.open with mode 0 create a read-only file, which then is harder to remove. Message-ID: <20120323235922.CF00682112@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r53959:fb4a283cefc4 Date: 2012-03-24 00:58 +0100 http://bitbucket.org/pypy/pypy/changeset/fb4a283cefc4/ Log: On Windows, tests using os.open with mode 0 create a read-only file, which then is harder to remove. diff --git a/pypy/translator/test/test_unsimplify.py b/pypy/translator/test/test_unsimplify.py --- a/pypy/translator/test/test_unsimplify.py +++ b/pypy/translator/test/test_unsimplify.py @@ -78,7 +78,7 @@ return x * 6 def hello_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_initial_function(t, hello_world) @@ -97,7 +97,7 @@ return x * 6 def goodbye_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_final_function(t, goodbye_world) From noreply at buildbot.pypy.org Sat Mar 24 01:04:18 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Sat, 24 Mar 2012 01:04:18 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: hg merge default Message-ID: <20120324000418.BDECD82112@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53960:f3ad45946763 Date: 2012-03-24 01:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f3ad45946763/ Log: hg merge default diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -244,8 +244,12 @@ skip("disabled") if self.runappdirect: total = 500000 + def rand(): + import random + return random.randrange(0, 5) else: total = 50 + rand = self.rand # class A(object): hash = None @@ -256,7 +260,7 @@ a = A() a.next = tail.next tail.next = a - for j in range(self.rand()): + for j in range(rand()): any = any.next if any.hash is None: any.hash = hash(any) diff --git a/pypy/translator/test/test_unsimplify.py b/pypy/translator/test/test_unsimplify.py --- a/pypy/translator/test/test_unsimplify.py +++ b/pypy/translator/test/test_unsimplify.py @@ -78,7 +78,7 @@ return x * 6 def hello_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_initial_function(t, hello_world) @@ -97,7 +97,7 @@ return x * 6 def goodbye_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_final_function(t, goodbye_world) From pullrequests-noreply at bitbucket.org Sat Mar 24 03:58:07 2012 From: pullrequests-noreply at bitbucket.org (Michael Blume) Date: Sat, 24 Mar 2012 02:58:07 -0000 Subject: [pypy-commit] [pypy/pypy] Allow "from numpy.core.numeric import shape" (pull request #66) Message-ID: <4d76889ee89e7bdf7c38d8e3ba5ccd20@bitbucket.org> A new pull request has been opened by Michael Blume. MichaelBlume/pypy/shape_in_numeric has changes to be pulled into pypy/pypy/default. https://bitbucket.org/pypy/pypy/pull-request/66/allow-from-numpycorenumeric-import-shape Title: Allow "from numpy.core.numeric import shape" Numpy imports thus in some libs, so users may too. Changes to be pulled: 25ef5ec91c76 by Michael Blume: "Allow for "from numpy.core.numeric import shape" This is used internally in Num?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Sat Mar 24 04:11:18 2012 From: pullrequests-noreply at bitbucket.org (Alex Gaynor) Date: Sat, 24 Mar 2012 03:11:18 -0000 Subject: [pypy-commit] [pypy/pypy] Allow "from numpy.core.numeric import shape" (pull request #66) In-Reply-To: <4d76889ee89e7bdf7c38d8e3ba5ccd20@bitbucket.org> References: <4d76889ee89e7bdf7c38d8e3ba5ccd20@bitbucket.org> Message-ID: <20120324031118.27214.51496@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/66/allow-from-numpycorenumeric-import-shape#comment-4307 Alex Gaynor (alex_gaynor) said: NumPy actually uses an import * there, so we may as well copy that. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sat Mar 24 15:43:23 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Mar 2012 15:43:23 +0100 (CET) Subject: [pypy-commit] pypy default: Write section about GC. Message-ID: <20120324144323.88C358445A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53961:0b2663cd686d Date: 2012-03-24 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/0b2663cd686d/ Log: Write section about GC. diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -60,9 +60,14 @@ xxx -* Garbage Collectors +* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` + equivalents in RPython code. `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. - xxx +.. _`Garbage collection in PyPy`: garbage_collection.html + Toolset ======= From noreply at buildbot.pypy.org Sat Mar 24 17:12:41 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 Mar 2012 17:12:41 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20120324161241.BEF398445B@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53963:0e91fc5fa900 Date: 2012-03-24 12:12 -0400 http://bitbucket.org/pypy/pypy/changeset/0e91fc5fa900/ Log: merged upstream diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -60,9 +60,14 @@ xxx -* Garbage Collectors +* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` + equivalents in RPython code. `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. - xxx +.. _`Garbage collection in PyPy`: garbage_collection.html + Toolset ======= diff --git a/pypy/translator/test/test_unsimplify.py b/pypy/translator/test/test_unsimplify.py --- a/pypy/translator/test/test_unsimplify.py +++ b/pypy/translator/test/test_unsimplify.py @@ -78,7 +78,7 @@ return x * 6 def hello_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_initial_function(t, hello_world) @@ -97,7 +97,7 @@ return x * 6 def goodbye_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_final_function(t, goodbye_world) From noreply at buildbot.pypy.org Sat Mar 24 17:12:40 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 Mar 2012 17:12:40 +0100 (CET) Subject: [pypy-commit] pypy default: fix a failing test Message-ID: <20120324161240.2053C8445A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53962:40fccb9514b8 Date: 2012-03-24 12:06 -0400 http://bitbucket.org/pypy/pypy/changeset/40fccb9514b8/ Log: fix a failing test diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -427,6 +427,10 @@ ## assert type(x) is int ## return llmemory.cast_int_to_adr(x) +def op_convert_float_bytes_to_longlong(a): + from pypy.rlib.longlong2float import float2longlong + return float2longlong(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 From noreply at buildbot.pypy.org Sat Mar 24 17:28:23 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Sat, 24 Mar 2012 17:28:23 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20120324162823.145798445A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53964:3ca10036f306 Date: 2012-03-24 17:27 +0100 http://bitbucket.org/pypy/pypy/changeset/3ca10036f306/ Log: Merge with default diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -60,9 +60,14 @@ xxx -* Garbage Collectors +* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` + equivalents in RPython code. `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. - xxx +.. _`Garbage collection in PyPy`: garbage_collection.html + Toolset ======= diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -427,6 +427,10 @@ ## assert type(x) is int ## return llmemory.cast_int_to_adr(x) +def op_convert_float_bytes_to_longlong(a): + from pypy.rlib.longlong2float import float2longlong + return float2longlong(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 From noreply at buildbot.pypy.org Sat Mar 24 18:01:16 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Sat, 24 Mar 2012 18:01:16 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: small cleanup Message-ID: <20120324170116.BC56D82E4D@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53965:6aee56b07e15 Date: 2012-03-24 18:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6aee56b07e15/ Log: small cleanup diff --git a/pypy/translator/tool/cbuild.py b/pypy/translator/tool/cbuild.py --- a/pypy/translator/tool/cbuild.py +++ b/pypy/translator/tool/cbuild.py @@ -321,8 +321,8 @@ /* Windows: winsock/winsock2 mess */ #define WIN32_LEAN_AND_MEAN #ifdef _WIN64 - typedef long long Signed; - typedef unsigned long long Unsigned; + typedef __int64 Signed; + typedef unsigned __int64 Unsigned; # define SIGNED_MIN LLONG_MIN #else typedef long Signed; From noreply at buildbot.pypy.org Sat Mar 24 20:52:19 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Sat, 24 Mar 2012 20:52:19 +0100 (CET) Subject: [pypy-commit] pypy default: Merge with win64-stage1 Message-ID: <20120324195219.D036182E4D@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r53966:85b0ab7ff9a1 Date: 2012-03-24 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/85b0ab7ff9a1/ Log: Merge with win64-stage1 diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py --- a/pypy/rlib/_rffi_stacklet.py +++ b/pypy/rlib/_rffi_stacklet.py @@ -14,7 +14,7 @@ includes = ['src/stacklet/stacklet.h'], separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'], ) -if sys.platform == 'win32': +if 'masm' in dir(eci.platform): # Microsoft compiler if is_emulated_long: asmsrc = 'switch_x64_msvc.asm' else: diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -114,9 +114,10 @@ ) eci = rffi_platform.configure_external_library( - 'libffi', eci, + 'libffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), + dict(prefix=r'c:\mingw64', include_dir='include', library_dir='lib'), ]) else: libffidir = py.path.local(pypydir).join('translator', 'c', 'src', 'libffi_msvc') diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -141,6 +141,10 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include + #ifdef __GNUC__ + #define _dosmaperr mingw_dosmaperr + #endif int main() { int i; diff --git a/pypy/rlib/test/autopath.py b/pypy/rlib/test/autopath.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/autopath.py @@ -0,0 +1,131 @@ +""" +self cloning, automatic path configuration + +copy this into any subdirectory of pypy from which scripts need +to be run, typically all of the test subdirs. +The idea is that any such script simply issues + + import autopath + +and this will make sure that the parent directory containing "pypy" +is in sys.path. + +If you modify the master "autopath.py" version (in pypy/tool/autopath.py) +you can directly run it which will copy itself on all autopath.py files +it finds under the pypy root directory. + +This module always provides these attributes: + + pypydir pypy root directory path + this_dir directory where this autopath.py resides + +""" + +def __dirinfo(part): + """ return (partdir, this_dir) and insert parent of partdir + into sys.path. If the parent directories don't have the part + an EnvironmentError is raised.""" + + import sys, os + try: + head = this_dir = os.path.realpath(os.path.dirname(__file__)) + except NameError: + head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + + error = None + while head: + partdir = head + head, tail = os.path.split(head) + if tail == part: + checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py') + if not os.path.exists(checkfile): + error = "Cannot find %r" % (os.path.normpath(checkfile),) + break + else: + error = "Cannot find the parent directory %r of the path %r" % ( + partdir, this_dir) + if not error: + # check for bogus end-of-line style (e.g. files checked out on + # Windows and moved to Unix) + f = open(__file__.replace('.pyc', '.py'), 'r') + data = f.read() + f.close() + if data.endswith('\r\n') or data.endswith('\r'): + error = ("Bad end-of-line style in the .py files. Typically " + "caused by a zip file or a checkout done on Windows and " + "moved to Unix or vice-versa.") + if error: + raise EnvironmentError("Invalid source tree - bogus checkout! " + + error) + + pypy_root = os.path.join(head, '') + try: + sys.path.remove(head) + except ValueError: + pass + sys.path.insert(0, head) + + munged = {} + for name, mod in sys.modules.items(): + if '.' in name: + continue + fn = getattr(mod, '__file__', None) + if not isinstance(fn, str): + continue + newname = os.path.splitext(os.path.basename(fn))[0] + if not newname.startswith(part + '.'): + continue + path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') + if path.startswith(pypy_root) and newname != part: + modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) + if newname != '__init__': + modpaths.append(newname) + modpath = '.'.join(modpaths) + if modpath not in sys.modules: + munged[modpath] = mod + + for name, mod in munged.iteritems(): + if name not in sys.modules: + sys.modules[name] = mod + if '.' in name: + prename = name[:name.rfind('.')] + postname = name[len(prename)+1:] + if prename not in sys.modules: + __import__(prename) + if not hasattr(sys.modules[prename], postname): + setattr(sys.modules[prename], postname, mod) + + return partdir, this_dir + +def __clone(): + """ clone master version of autopath.py into all subdirs """ + from os.path import join, walk + if not this_dir.endswith(join('pypy','tool')): + raise EnvironmentError("can only clone master version " + "'%s'" % join(pypydir, 'tool',_myname)) + + + def sync_walker(arg, dirname, fnames): + if _myname in fnames: + fn = join(dirname, _myname) + f = open(fn, 'rwb+') + try: + if f.read() == arg: + print "checkok", fn + else: + print "syncing", fn + f = open(fn, 'w') + f.write(arg) + finally: + f.close() + s = open(join(pypydir, 'tool', _myname), 'rb').read() + walk(pypydir, sync_walker, s) + +_myname = 'autopath.py' + +# set guaranteed attributes + +pypydir, this_dir = __dirinfo('pypy') + +if __name__ == '__main__': + __clone() diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -660,8 +660,8 @@ if isinstance(fieldtype, lltype.FixedSizeArray): size, _ = expected_size_and_sign return lltype.FixedSizeArray(fieldtype.OF, size/_sizeof(fieldtype.OF)) - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) + raise TypeError("conflict between translating python and compiler field" + " type %r for %r" % (fieldtype, fieldname)) def expose_value_as_rpython(value): if intmask(value) == value: diff --git a/pypy/translator/c/src/libffi_msvc/win64.asm b/pypy/translator/c/src/libffi_msvc/win64.asm new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/libffi_msvc/win64.asm @@ -0,0 +1,156 @@ +PUBLIC ffi_call_AMD64 + +EXTRN __chkstk:NEAR +EXTRN ffi_closure_SYSV:NEAR + +_TEXT SEGMENT + +;;; ffi_closure_OUTER will be called with these registers set: +;;; rax points to 'closure' +;;; r11 contains a bit mask that specifies which of the +;;; first four parameters are float or double +;;; +;;; It must move the parameters passed in registers to their stack location, +;;; call ffi_closure_SYSV for the actual work, then return the result. +;;; +ffi_closure_OUTER PROC FRAME + ;; save actual arguments to their stack space. + test r11, 1 + jne first_is_float + mov QWORD PTR [rsp+8], rcx + jmp second +first_is_float: + movlpd QWORD PTR [rsp+8], xmm0 + +second: + test r11, 2 + jne second_is_float + mov QWORD PTR [rsp+16], rdx + jmp third +second_is_float: + movlpd QWORD PTR [rsp+16], xmm1 + +third: + test r11, 4 + jne third_is_float + mov QWORD PTR [rsp+24], r8 + jmp forth +third_is_float: + movlpd QWORD PTR [rsp+24], xmm2 + +forth: + test r11, 8 + jne forth_is_float + mov QWORD PTR [rsp+32], r9 + jmp done +forth_is_float: + movlpd QWORD PTR [rsp+32], xmm3 + +done: +.ALLOCSTACK 40 + sub rsp, 40 +.ENDPROLOG + mov rcx, rax ; context is first parameter + mov rdx, rsp ; stack is second parameter + add rdx, 40 ; correct our own area + mov rax, ffi_closure_SYSV + call rax ; call the real closure function + ;; Here, code is missing that handles float return values + add rsp, 40 + movd xmm0, rax ; In case the closure returned a float. + ret 0 +ffi_closure_OUTER ENDP + + +;;; ffi_call_AMD64 + +stack$ = 0 +prepfunc$ = 32 +ecif$ = 40 +bytes$ = 48 +flags$ = 56 +rvalue$ = 64 +fn$ = 72 + +ffi_call_AMD64 PROC FRAME + + mov QWORD PTR [rsp+32], r9 + mov QWORD PTR [rsp+24], r8 + mov QWORD PTR [rsp+16], rdx + mov QWORD PTR [rsp+8], rcx +.PUSHREG rbp + push rbp +.ALLOCSTACK 48 + sub rsp, 48 ; 00000030H +.SETFRAME rbp, 32 + lea rbp, QWORD PTR [rsp+32] +.ENDPROLOG + + mov eax, DWORD PTR bytes$[rbp] + add rax, 15 + and rax, -16 + call __chkstk + sub rsp, rax + lea rax, QWORD PTR [rsp+32] + mov QWORD PTR stack$[rbp], rax + + mov rdx, QWORD PTR ecif$[rbp] + mov rcx, QWORD PTR stack$[rbp] + call QWORD PTR prepfunc$[rbp] + + mov rsp, QWORD PTR stack$[rbp] + + movlpd xmm3, QWORD PTR [rsp+24] + movd r9, xmm3 + + movlpd xmm2, QWORD PTR [rsp+16] + movd r8, xmm2 + + movlpd xmm1, QWORD PTR [rsp+8] + movd rdx, xmm1 + + movlpd xmm0, QWORD PTR [rsp] + movd rcx, xmm0 + + call QWORD PTR fn$[rbp] +ret_int$: + cmp DWORD PTR flags$[rbp], 1 ; FFI_TYPE_INT + jne ret_float$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov DWORD PTR [rcx], eax + jmp SHORT ret_nothing$ + +ret_float$: + cmp DWORD PTR flags$[rbp], 2 ; FFI_TYPE_FLOAT + jne SHORT ret_double$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_double$: + cmp DWORD PTR flags$[rbp], 3 ; FFI_TYPE_DOUBLE + jne SHORT ret_int64$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_int64$: + cmp DWORD PTR flags$[rbp], 12 ; FFI_TYPE_SINT64 + jne ret_nothing$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov QWORD PTR [rcx], rax + jmp SHORT ret_nothing$ + +ret_nothing$: + xor eax, eax + + lea rsp, QWORD PTR [rbp+16] + pop rbp + ret 0 +ffi_call_AMD64 ENDP +_TEXT ENDS +END diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -301,6 +301,8 @@ global platform log.msg("Setting platform to %r cc=%s" % (new_platform,cc)) platform = pick_platform(new_platform, cc) + if not platform: + raise ValueError("pick_platform failed") if new_platform == 'host': global host diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -7,15 +7,27 @@ from pypy.translator.platform import log, _run_subprocess from pypy.translator.platform import Platform, posix +def _get_compiler_type(cc, x64_flag): + import subprocess + if not cc: + cc = os.environ.get('CC','') + if not cc: + return MsvcPlatform(cc=cc, x64=x64_flag) + elif cc.startswith('mingw'): + return MingwPlatform(cc) + try: + subprocess.check_output([cc, '--version']) + except: + raise ValueError,"Could not find compiler specified by cc option" + \ + " '%s', it must be a valid exe file on your path"%cc + return MingwPlatform(cc) + def Windows(cc=None): - if cc == 'mingw32': - return MingwPlatform(cc) - else: - return MsvcPlatform(cc, False) + return _get_compiler_type(cc, False) + +def Windows_x64(cc=None): + return _get_compiler_type(cc, True) -def Windows_x64(cc=None): - return MsvcPlatform(cc, True) - def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -31,14 +43,16 @@ vcvars = os.path.join(toolsdir, 'vsvars32.bat') import subprocess - popen = subprocess.Popen('"%s" & set' % (vcvars,), + try: + popen = subprocess.Popen('"%s" & set' % (vcvars,), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = popen.communicate() - if popen.wait() != 0: - return - + stdout, stderr = popen.communicate() + if popen.wait() != 0: + return None + except: + return None env = {} stdout = stdout.replace("\r\n", "\n") @@ -395,7 +409,9 @@ so_ext = 'dll' def __init__(self, cc=None): - Platform.__init__(self, 'gcc') + if not cc: + cc = 'gcc' + Platform.__init__(self, cc) def _args_for_shared(self, args): return ['-shared'] + args From noreply at buildbot.pypy.org Sat Mar 24 20:55:17 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 Mar 2012 20:55:17 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: fix translation error caused by reuse of self.res attribute by both out arg processing and ToStringArray Message-ID: <20120324195517.A495C82E4D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53967:cda3f143e279 Date: 2012-03-24 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/cda3f143e279/ Log: fix translation error caused by reuse of self.res attribute by both out arg processing and ToStringArray diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -871,9 +871,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -361,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): From noreply at buildbot.pypy.org Sat Mar 24 20:55:19 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 Mar 2012 20:55:19 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: merge from default, ready for review Message-ID: <20120324195519.AEE8C82E4D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53968:0add464679de Date: 2012-03-24 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/0add464679de/ Log: merge from default, ready for review diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,75 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 miles view: + +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + The RPython standard library is to be found in the ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* JIT + + xxx + +* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` + equivalents in RPython code. `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. + +.. _`Garbage collection in PyPy`: garbage_collection.html + + +Toolset +======= + +xxx diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,7 +601,9 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers. + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -108,6 +108,7 @@ def setup_class(cls): cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,6 +165,7 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -359,6 +359,7 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -386,23 +387,6 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - self.w_longlongdtype = W_Dtype( - types.Int64(), - num=9, - kind=SIGNEDLTR, - name='int64', - char='q', - w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), - alternate_constructors=[space.w_long], - ) - self.w_ulonglongdtype = W_Dtype( - types.UInt64(), - num=10, - kind=UNSIGNEDLTR, - name='uint64', - char='Q', - w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), - ) self.w_stringdtype = W_Dtype( types.StringType(1), num=18, @@ -435,17 +419,19 @@ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_longlongdtype, self.w_ulonglongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} - for dtype in self.builtin_dtypes: + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): self.dtypes_by_name[dtype.name] = dtype can_name = dtype.kind + str(dtype.itemtype.get_element_size()) self.dtypes_by_name[can_name] = dtype @@ -473,7 +459,7 @@ 'LONG': self.w_longdtype, 'UNICODE': self.w_unicodedtype, #'OBJECT', - 'ULONGLONG': self.w_ulonglongdtype, + 'ULONGLONG': self.w_uint64dtype, 'STRING': self.w_stringdtype, #'CDOUBLE', #'DATETIME', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1154,7 +1154,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1199,8 +1200,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -425,7 +425,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -302,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -333,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -500,6 +500,19 @@ BoxType = interp_boxes.W_ULongBox format_code = "L" +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): _attrs_ = () @@ -507,6 +520,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + class NonNativeInt64(BaseType, NonNativeInteger): _attrs_ = () @@ -514,6 +529,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + def _uint64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,7 +545,7 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue - if isinstance(n, (r_longlong, r_ulonglong)): + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): continue if isinstance(n, (str, unicode)) and len(n) == 1: continue diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,20 +69,6 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) - registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -244,8 +244,12 @@ skip("disabled") if self.runappdirect: total = 500000 + def rand(): + import random + return random.randrange(0, 5) else: total = 50 + rand = self.rand # class A(object): hash = None @@ -256,7 +260,7 @@ a = A() a.next = tail.next tail.next = a - for j in range(self.rand()): + for j in range(rand()): any = any.next if any.hash is None: any.hash = hash(any) diff --git a/pypy/rlib/parsing/pypackrat.py b/pypy/rlib/parsing/pypackrat.py --- a/pypy/rlib/parsing/pypackrat.py +++ b/pypy/rlib/parsing/pypackrat.py @@ -1,6 +1,8 @@ from pypy.rlib.parsing.tree import Nonterminal, Symbol -from makepackrat import PackratParser, BacktrackException, Status +from pypy.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status + + class Parser(object): def NAME(self): return self._NAME().result diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -427,6 +427,10 @@ ## assert type(x) is int ## return llmemory.cast_int_to_adr(x) +def op_convert_float_bytes_to_longlong(a): + from pypy.rlib.longlong2float import float2longlong + return float2longlong(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -165,6 +165,7 @@ v_char = hop.inputarg(rstr.char_repr, arg=1) v_left = hop.inputconst(Bool, left) v_right = hop.inputconst(Bool, right) + hop.exception_is_here() return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) def rtype_method_lstrip(self, hop): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -637,13 +637,16 @@ def _make_split_test(self, split_fn): const = self.const def fn(i): - s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = getattr(s, split_fn)(const('.')) - sum = 0 - for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) - return sum + len(l) * 100 + try: + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.')) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + except MemoryError: + return 42 return fn def test_split(self): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', + 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) diff --git a/pypy/translator/test/test_unsimplify.py b/pypy/translator/test/test_unsimplify.py --- a/pypy/translator/test/test_unsimplify.py +++ b/pypy/translator/test/test_unsimplify.py @@ -78,7 +78,7 @@ return x * 6 def hello_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_initial_function(t, hello_world) @@ -97,7 +97,7 @@ return x * 6 def goodbye_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_final_function(t, goodbye_world) From noreply at buildbot.pypy.org Sat Mar 24 20:55:20 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 Mar 2012 20:55:20 +0100 (CET) Subject: [pypy-commit] pypy numpypy-out: fix zjit arraylen_gc count Message-ID: <20120324195520.ECA7A82E4D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-out Changeset: r53969:7fb6f88a0063 Date: 2012-03-24 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/7fb6f88a0063/ Log: fix zjit arraylen_gc count diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, From noreply at buildbot.pypy.org Sat Mar 24 21:19:16 2012 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 24 Mar 2012 21:19:16 +0100 (CET) Subject: [pypy-commit] pypy default: v+stuff does not produce pointers Message-ID: <20120324201916.E15A382E4D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53970:03bbbea4dbff Date: 2012-03-24 22:18 +0200 http://bitbucket.org/pypy/pypy/changeset/03bbbea4dbff/ Log: v+stuff does not produce pointers diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', + 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers From noreply at buildbot.pypy.org Sat Mar 24 23:59:51 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 Mar 2012 23:59:51 +0100 (CET) Subject: [pypy-commit] pypy default: remove flags from mmap signature on windows Message-ID: <20120324225951.CA89882E4D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r53971:2a83c08dcb0e Date: 2012-03-25 00:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2a83c08dcb0e/ Log: remove flags from mmap signature on windows diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length From noreply at buildbot.pypy.org Sun Mar 25 09:59:46 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Mar 2012 09:59:46 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: avoid py.path.local problem with empty cdrom drive Message-ID: <20120325075946.D49DE820D9@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53972:0993bdbb1d59 Date: 2012-03-25 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/0993bdbb1d59/ Log: avoid py.path.local problem with empty cdrom drive diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -707,14 +707,14 @@ # ____________________________________________________________ -PYPY_EXTERNAL_DIR = py.path.local(pypydir).join('..', '..') +PYPY_EXTERNAL_DIR = os.path.abspath(os.path.join(pypydir,'../..')) # XXX make this configurable if sys.platform == 'win32': for libdir in [ - py.path.local('c:/buildslave/support'), # on the bigboard buildbot - py.path.local('d:/myslave'), # on the snakepit buildbot + 'c:/buildslave/support', # on the bigboard buildbot + 'd:/myslave', # on the snakepit buildbot ]: - if libdir.check(): + if os.path.exists(libdir): PYPY_EXTERNAL_DIR = libdir break @@ -759,7 +759,7 @@ if prefix and not os.path.isabs(prefix): import glob - entries = glob.glob(str(PYPY_EXTERNAL_DIR.join(prefix + '*'))) + entries = glob.glob(os.path.join(PYPY_EXTERNAL_DIR,'*')) if entries: # Get last version prefix = sorted(entries)[-1] From noreply at buildbot.pypy.org Sun Mar 25 10:29:29 2012 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Mar 2012 10:29:29 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: whoops (amaury_) Message-ID: <20120325082929.71B65820D9@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: win64-stage1 Changeset: r53973:3e677b357851 Date: 2012-03-25 10:28 +0200 http://bitbucket.org/pypy/pypy/changeset/3e677b357851/ Log: whoops (amaury_) diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -758,8 +758,8 @@ if prefix and not os.path.isabs(prefix): import glob - - entries = glob.glob(os.path.join(PYPY_EXTERNAL_DIR,'*')) + testdir = os.path.join(PYPY_EXTERNAL_DIR,prefix) + entries = glob.glob(os.path.join(testdir,'*')) if entries: # Get last version prefix = sorted(entries)[-1] From noreply at buildbot.pypy.org Sun Mar 25 14:17:51 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Mar 2012 14:17:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Section about the JIT. Message-ID: <20120325121751.C6557820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53974:10f089778aae Date: 2012-03-25 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/10f089778aae/ Log: Section about the JIT. diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -56,12 +56,18 @@ xxx -* JIT +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any language. + But getting it to work correctly is not trivial: it requires a small + number of precise "hints" and possibly some small refactorings of the + interpreter. - xxx +.. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` - equivalents in RPython code. `Garbage collection in PyPy`_ is inserted +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real GC written as more RPython code. The best one we have so far is in ``rpython/memory/gc/minimark.py``. From noreply at buildbot.pypy.org Sun Mar 25 15:02:51 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Mar 2012 15:02:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Complete. Message-ID: <20120325130251.E6557820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53975:8ae92dbdda48 Date: 2012-03-25 14:23 +0200 http://bitbucket.org/pypy/pypy/changeset/8ae92dbdda48/ Log: Complete. diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -58,10 +58,15 @@ * Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the interpreter written in RPython, rather than the user program that it - interprets. As a result it applies to any interpreter, i.e. any language. - But getting it to work correctly is not trivial: it requires a small - number of precise "hints" and possibly some small refactorings of the - interpreter. + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. .. _`we have a tracing JIT`: jit/index.html From mattip at wyvern.cs.uni-duesseldorf.de Sun Mar 25 16:41:39 2012 From: mattip at wyvern.cs.uni-duesseldorf.de (mattip at wyvern.cs.uni-duesseldorf.de) Date: Sun, 25 Mar 2012 16:41:39 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: Backed out changeset: 3e677b357851 Message-ID: <20120325144139.96495820D9@wyvern.cs.uni-duesseldorf.de> Author: mattip> Branch: win64-stage1 Changeset: r53976:1b4254e68bcb Date: 2012-03-25 16:28 +0200 http://bitbucket.org/pypy/pypy/changeset/1b4254e68bcb/ Log: Backed out changeset: 3e677b357851 diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -758,8 +758,8 @@ if prefix and not os.path.isabs(prefix): import glob - testdir = os.path.join(PYPY_EXTERNAL_DIR,prefix) - entries = glob.glob(os.path.join(testdir,'*')) + + entries = glob.glob(os.path.join(PYPY_EXTERNAL_DIR,'*')) if entries: # Get last version prefix = sorted(entries)[-1] From mattip at wyvern.cs.uni-duesseldorf.de Sun Mar 25 16:41:40 2012 From: mattip at wyvern.cs.uni-duesseldorf.de (mattip at wyvern.cs.uni-duesseldorf.de) Date: Sun, 25 Mar 2012 16:41:40 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: Backed out changeset: 0993bdbb1d59 Message-ID: <20120325144140.CE10D820D9@wyvern.cs.uni-duesseldorf.de> Author: mattip> Branch: win64-stage1 Changeset: r53977:47f38b0ee33e Date: 2012-03-25 16:30 +0200 http://bitbucket.org/pypy/pypy/changeset/47f38b0ee33e/ Log: Backed out changeset: 0993bdbb1d59 diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -707,14 +707,14 @@ # ____________________________________________________________ -PYPY_EXTERNAL_DIR = os.path.abspath(os.path.join(pypydir,'../..')) +PYPY_EXTERNAL_DIR = py.path.local(pypydir).join('..', '..') # XXX make this configurable if sys.platform == 'win32': for libdir in [ - 'c:/buildslave/support', # on the bigboard buildbot - 'd:/myslave', # on the snakepit buildbot + py.path.local('c:/buildslave/support'), # on the bigboard buildbot + py.path.local('d:/myslave'), # on the snakepit buildbot ]: - if os.path.exists(libdir): + if libdir.check(): PYPY_EXTERNAL_DIR = libdir break @@ -759,7 +759,7 @@ if prefix and not os.path.isabs(prefix): import glob - entries = glob.glob(os.path.join(PYPY_EXTERNAL_DIR,'*')) + entries = glob.glob(str(PYPY_EXTERNAL_DIR.join(prefix + '*'))) if entries: # Get last version prefix = sorted(entries)[-1] From mattip at wyvern.cs.uni-duesseldorf.de Sun Mar 25 16:41:42 2012 From: mattip at wyvern.cs.uni-duesseldorf.de (mattip at wyvern.cs.uni-duesseldorf.de) Date: Sun, 25 Mar 2012 16:41:42 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: merge with default Message-ID: <20120325144142.33CB1820D9@wyvern.cs.uni-duesseldorf.de> Author: mattip> Branch: win64-stage1 Changeset: r53978:73c306cfbddc Date: 2012-03-25 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/73c306cfbddc/ Log: merge with default diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', + 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers From noreply at buildbot.pypy.org Mon Mar 26 00:08:25 2012 From: noreply at buildbot.pypy.org (Matti Picus) Date: Mon, 26 Mar 2012 00:08:25 +0200 (CEST) Subject: [pypy-commit] pypy default: try hard to package correct runtime Message-ID: <20120325220825.DAF3B820D9@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r53979:35e8484be187 Date: 2012-03-26 00:07 +0200 http://bitbucket.org/pypy/pypy/changeset/35e8484be187/ Log: try hard to package correct runtime diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,9 +58,33 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': + #What runtime do we need? + msvc_runtime = 'msvcr80.dll' #default is studio 2005 vc8 + try: + import subprocess + out,err = subprocess.Popen([str(pypy_c), '-c', + 'import sys; print sys.version'], + stdout=subprocess.PIPE).communicate() + indx=out.find('MSC v.') + 6 + if indx> 10: + if out[indx:].startswith('1600'): + msvc_runtime = 'msvcr100.dll' #studio 2010 vc10 + elif out[indx:].startwith('1500'): + msvc_runtime = 'msvcr90.dll' #studio 2009 vc9 + elif out[indx:].startswith('1400'): + msvc_runtime = 'msvcr80.dll' #studio 2005 vc8 + else: + print 'Cannot determine runtime dll for pypy' \ + ' version "%s"'%out + else: + print 'Cannot determine runtime dll for pypy' \ + ' version "%s"'%out + except : + pass # Can't rename a DLL: it is always called 'libpypy-c.dll' + for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', + 'libexpat.dll', 'sqlite3.dll', msvc_runtime, 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Mon Mar 26 05:40:55 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 26 Mar 2012 05:40:55 +0200 (CEST) Subject: [pypy-commit] pypy default: If any of these are None, don't export them. Message-ID: <20120326034055.58B709B6007@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53980:345f790b2da0 Date: 2012-03-25 23:40 -0400 http://bitbucket.org/pypy/pypy/changeset/345f790b2da0/ Log: If any of these are None, don't export them. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -16,13 +16,15 @@ appleveldefs = {} interpleveldefs = {} if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" interpleveldefs["clock_getres"] = "interp_time.clock_getres" for name in [ "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" ]: - interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name class Module(MixedModule): From noreply at buildbot.pypy.org Mon Mar 26 05:41:02 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 26 Mar 2012 05:41:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged upstream. Message-ID: <20120326034102.D56839B6007@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r53981:96aed8c23574 Date: 2012-03-25 23:40 -0400 http://bitbucket.org/pypy/pypy/changeset/96aed8c23574/ Log: Merged upstream. diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -56,12 +56,23 @@ xxx -* JIT +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. - xxx +.. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` - equivalents in RPython code. `Garbage collection in PyPy`_ is inserted +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real GC written as more RPython code. The best one we have so far is in ``rpython/memory/gc/minimark.py``. diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py --- a/pypy/rlib/_rffi_stacklet.py +++ b/pypy/rlib/_rffi_stacklet.py @@ -14,7 +14,7 @@ includes = ['src/stacklet/stacklet.h'], separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'], ) -if sys.platform == 'win32': +if 'masm' in dir(eci.platform): # Microsoft compiler if is_emulated_long: asmsrc = 'switch_x64_msvc.asm' else: diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -114,9 +114,10 @@ ) eci = rffi_platform.configure_external_library( - 'libffi', eci, + 'libffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), + dict(prefix=r'c:\mingw64', include_dir='include', library_dir='lib'), ]) else: libffidir = py.path.local(pypydir).join('translator', 'c', 'src', 'libffi_msvc') diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -141,6 +141,10 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include + #ifdef __GNUC__ + #define _dosmaperr mingw_dosmaperr + #endif int main() { int i; diff --git a/pypy/rlib/test/autopath.py b/pypy/rlib/test/autopath.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/autopath.py @@ -0,0 +1,131 @@ +""" +self cloning, automatic path configuration + +copy this into any subdirectory of pypy from which scripts need +to be run, typically all of the test subdirs. +The idea is that any such script simply issues + + import autopath + +and this will make sure that the parent directory containing "pypy" +is in sys.path. + +If you modify the master "autopath.py" version (in pypy/tool/autopath.py) +you can directly run it which will copy itself on all autopath.py files +it finds under the pypy root directory. + +This module always provides these attributes: + + pypydir pypy root directory path + this_dir directory where this autopath.py resides + +""" + +def __dirinfo(part): + """ return (partdir, this_dir) and insert parent of partdir + into sys.path. If the parent directories don't have the part + an EnvironmentError is raised.""" + + import sys, os + try: + head = this_dir = os.path.realpath(os.path.dirname(__file__)) + except NameError: + head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + + error = None + while head: + partdir = head + head, tail = os.path.split(head) + if tail == part: + checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py') + if not os.path.exists(checkfile): + error = "Cannot find %r" % (os.path.normpath(checkfile),) + break + else: + error = "Cannot find the parent directory %r of the path %r" % ( + partdir, this_dir) + if not error: + # check for bogus end-of-line style (e.g. files checked out on + # Windows and moved to Unix) + f = open(__file__.replace('.pyc', '.py'), 'r') + data = f.read() + f.close() + if data.endswith('\r\n') or data.endswith('\r'): + error = ("Bad end-of-line style in the .py files. Typically " + "caused by a zip file or a checkout done on Windows and " + "moved to Unix or vice-versa.") + if error: + raise EnvironmentError("Invalid source tree - bogus checkout! " + + error) + + pypy_root = os.path.join(head, '') + try: + sys.path.remove(head) + except ValueError: + pass + sys.path.insert(0, head) + + munged = {} + for name, mod in sys.modules.items(): + if '.' in name: + continue + fn = getattr(mod, '__file__', None) + if not isinstance(fn, str): + continue + newname = os.path.splitext(os.path.basename(fn))[0] + if not newname.startswith(part + '.'): + continue + path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') + if path.startswith(pypy_root) and newname != part: + modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) + if newname != '__init__': + modpaths.append(newname) + modpath = '.'.join(modpaths) + if modpath not in sys.modules: + munged[modpath] = mod + + for name, mod in munged.iteritems(): + if name not in sys.modules: + sys.modules[name] = mod + if '.' in name: + prename = name[:name.rfind('.')] + postname = name[len(prename)+1:] + if prename not in sys.modules: + __import__(prename) + if not hasattr(sys.modules[prename], postname): + setattr(sys.modules[prename], postname, mod) + + return partdir, this_dir + +def __clone(): + """ clone master version of autopath.py into all subdirs """ + from os.path import join, walk + if not this_dir.endswith(join('pypy','tool')): + raise EnvironmentError("can only clone master version " + "'%s'" % join(pypydir, 'tool',_myname)) + + + def sync_walker(arg, dirname, fnames): + if _myname in fnames: + fn = join(dirname, _myname) + f = open(fn, 'rwb+') + try: + if f.read() == arg: + print "checkok", fn + else: + print "syncing", fn + f = open(fn, 'w') + f.write(arg) + finally: + f.close() + s = open(join(pypydir, 'tool', _myname), 'rb').read() + walk(pypydir, sync_walker, s) + +_myname = 'autopath.py' + +# set guaranteed attributes + +pypydir, this_dir = __dirinfo('pypy') + +if __name__ == '__main__': + __clone() diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -660,8 +660,8 @@ if isinstance(fieldtype, lltype.FixedSizeArray): size, _ = expected_size_and_sign return lltype.FixedSizeArray(fieldtype.OF, size/_sizeof(fieldtype.OF)) - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) + raise TypeError("conflict between translating python and compiler field" + " type %r for %r" % (fieldtype, fieldname)) def expose_value_as_rpython(value): if intmask(value) == value: diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,9 +58,33 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': + #What runtime do we need? + msvc_runtime = 'msvcr80.dll' #default is studio 2005 vc8 + try: + import subprocess + out,err = subprocess.Popen([str(pypy_c), '-c', + 'import sys; print sys.version'], + stdout=subprocess.PIPE).communicate() + indx=out.find('MSC v.') + 6 + if indx> 10: + if out[indx:].startswith('1600'): + msvc_runtime = 'msvcr100.dll' #studio 2010 vc10 + elif out[indx:].startwith('1500'): + msvc_runtime = 'msvcr90.dll' #studio 2009 vc9 + elif out[indx:].startswith('1400'): + msvc_runtime = 'msvcr80.dll' #studio 2005 vc8 + else: + print 'Cannot determine runtime dll for pypy' \ + ' version "%s"'%out + else: + print 'Cannot determine runtime dll for pypy' \ + ' version "%s"'%out + except : + pass # Can't rename a DLL: it is always called 'libpypy-c.dll' + for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', + 'libexpat.dll', 'sqlite3.dll', msvc_runtime, 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', + 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/c/src/libffi_msvc/win64.asm b/pypy/translator/c/src/libffi_msvc/win64.asm new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/libffi_msvc/win64.asm @@ -0,0 +1,156 @@ +PUBLIC ffi_call_AMD64 + +EXTRN __chkstk:NEAR +EXTRN ffi_closure_SYSV:NEAR + +_TEXT SEGMENT + +;;; ffi_closure_OUTER will be called with these registers set: +;;; rax points to 'closure' +;;; r11 contains a bit mask that specifies which of the +;;; first four parameters are float or double +;;; +;;; It must move the parameters passed in registers to their stack location, +;;; call ffi_closure_SYSV for the actual work, then return the result. +;;; +ffi_closure_OUTER PROC FRAME + ;; save actual arguments to their stack space. + test r11, 1 + jne first_is_float + mov QWORD PTR [rsp+8], rcx + jmp second +first_is_float: + movlpd QWORD PTR [rsp+8], xmm0 + +second: + test r11, 2 + jne second_is_float + mov QWORD PTR [rsp+16], rdx + jmp third +second_is_float: + movlpd QWORD PTR [rsp+16], xmm1 + +third: + test r11, 4 + jne third_is_float + mov QWORD PTR [rsp+24], r8 + jmp forth +third_is_float: + movlpd QWORD PTR [rsp+24], xmm2 + +forth: + test r11, 8 + jne forth_is_float + mov QWORD PTR [rsp+32], r9 + jmp done +forth_is_float: + movlpd QWORD PTR [rsp+32], xmm3 + +done: +.ALLOCSTACK 40 + sub rsp, 40 +.ENDPROLOG + mov rcx, rax ; context is first parameter + mov rdx, rsp ; stack is second parameter + add rdx, 40 ; correct our own area + mov rax, ffi_closure_SYSV + call rax ; call the real closure function + ;; Here, code is missing that handles float return values + add rsp, 40 + movd xmm0, rax ; In case the closure returned a float. + ret 0 +ffi_closure_OUTER ENDP + + +;;; ffi_call_AMD64 + +stack$ = 0 +prepfunc$ = 32 +ecif$ = 40 +bytes$ = 48 +flags$ = 56 +rvalue$ = 64 +fn$ = 72 + +ffi_call_AMD64 PROC FRAME + + mov QWORD PTR [rsp+32], r9 + mov QWORD PTR [rsp+24], r8 + mov QWORD PTR [rsp+16], rdx + mov QWORD PTR [rsp+8], rcx +.PUSHREG rbp + push rbp +.ALLOCSTACK 48 + sub rsp, 48 ; 00000030H +.SETFRAME rbp, 32 + lea rbp, QWORD PTR [rsp+32] +.ENDPROLOG + + mov eax, DWORD PTR bytes$[rbp] + add rax, 15 + and rax, -16 + call __chkstk + sub rsp, rax + lea rax, QWORD PTR [rsp+32] + mov QWORD PTR stack$[rbp], rax + + mov rdx, QWORD PTR ecif$[rbp] + mov rcx, QWORD PTR stack$[rbp] + call QWORD PTR prepfunc$[rbp] + + mov rsp, QWORD PTR stack$[rbp] + + movlpd xmm3, QWORD PTR [rsp+24] + movd r9, xmm3 + + movlpd xmm2, QWORD PTR [rsp+16] + movd r8, xmm2 + + movlpd xmm1, QWORD PTR [rsp+8] + movd rdx, xmm1 + + movlpd xmm0, QWORD PTR [rsp] + movd rcx, xmm0 + + call QWORD PTR fn$[rbp] +ret_int$: + cmp DWORD PTR flags$[rbp], 1 ; FFI_TYPE_INT + jne ret_float$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov DWORD PTR [rcx], eax + jmp SHORT ret_nothing$ + +ret_float$: + cmp DWORD PTR flags$[rbp], 2 ; FFI_TYPE_FLOAT + jne SHORT ret_double$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_double$: + cmp DWORD PTR flags$[rbp], 3 ; FFI_TYPE_DOUBLE + jne SHORT ret_int64$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_int64$: + cmp DWORD PTR flags$[rbp], 12 ; FFI_TYPE_SINT64 + jne ret_nothing$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov QWORD PTR [rcx], rax + jmp SHORT ret_nothing$ + +ret_nothing$: + xor eax, eax + + lea rsp, QWORD PTR [rbp+16] + pop rbp + ret 0 +ffi_call_AMD64 ENDP +_TEXT ENDS +END diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -301,6 +301,8 @@ global platform log.msg("Setting platform to %r cc=%s" % (new_platform,cc)) platform = pick_platform(new_platform, cc) + if not platform: + raise ValueError("pick_platform failed") if new_platform == 'host': global host diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -7,15 +7,27 @@ from pypy.translator.platform import log, _run_subprocess from pypy.translator.platform import Platform, posix +def _get_compiler_type(cc, x64_flag): + import subprocess + if not cc: + cc = os.environ.get('CC','') + if not cc: + return MsvcPlatform(cc=cc, x64=x64_flag) + elif cc.startswith('mingw'): + return MingwPlatform(cc) + try: + subprocess.check_output([cc, '--version']) + except: + raise ValueError,"Could not find compiler specified by cc option" + \ + " '%s', it must be a valid exe file on your path"%cc + return MingwPlatform(cc) + def Windows(cc=None): - if cc == 'mingw32': - return MingwPlatform(cc) - else: - return MsvcPlatform(cc, False) + return _get_compiler_type(cc, False) + +def Windows_x64(cc=None): + return _get_compiler_type(cc, True) -def Windows_x64(cc=None): - return MsvcPlatform(cc, True) - def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -31,14 +43,16 @@ vcvars = os.path.join(toolsdir, 'vsvars32.bat') import subprocess - popen = subprocess.Popen('"%s" & set' % (vcvars,), + try: + popen = subprocess.Popen('"%s" & set' % (vcvars,), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = popen.communicate() - if popen.wait() != 0: - return - + stdout, stderr = popen.communicate() + if popen.wait() != 0: + return None + except: + return None env = {} stdout = stdout.replace("\r\n", "\n") @@ -395,7 +409,9 @@ so_ext = 'dll' def __init__(self, cc=None): - Platform.__init__(self, 'gcc') + if not cc: + cc = 'gcc' + Platform.__init__(self, cc) def _args_for_shared(self, args): return ['-shared'] + args From noreply at buildbot.pypy.org Mon Mar 26 13:31:50 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 13:31:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Python 2.5 compat Message-ID: <20120326113150.07C14820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r53982:db33a634cb09 Date: 2012-03-26 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/db33a634cb09/ Log: Python 2.5 compat diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.interpreter.error import exception_from_errno diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -6,6 +6,7 @@ in which it does not work. """ +from __future__ import with_statement from pypy.annotation import model as annmodel from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi From noreply at buildbot.pypy.org Mon Mar 26 13:32:16 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 13:32:16 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: hg merge default Message-ID: <20120326113216.CC425820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53983:a6cefb709499 Date: 2012-03-26 13:22 +0200 http://bitbucket.org/pypy/pypy/changeset/a6cefb709499/ Log: hg merge default diff too long, truncating to 10000 out of 177558 lines diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py --- a/ctypes_configure/cbuild.py +++ b/ctypes_configure/cbuild.py @@ -206,8 +206,9 @@ cfiles += eci.separate_module_files include_dirs = list(eci.include_dirs) library_dirs = list(eci.library_dirs) - if sys.platform == 'darwin': # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in include_dirs and \ os.path.exists(s + 'include'): include_dirs.append(s + 'include') @@ -380,9 +381,9 @@ self.link_extra += ['-pthread'] if sys.platform == 'win32': self.link_extra += ['/DEBUG'] # generate .pdb file - if sys.platform == 'darwin': - # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): + if (sys.platform == 'darwin' or # support Fink & Darwinports + sys.platform.startswith('freebsd')): + for s in ('/sw/', '/opt/local/', '/usr/local/'): if s + 'include' not in self.include_dirs and \ os.path.exists(s + 'include'): self.include_dirs.append(s + 'include') @@ -395,7 +396,6 @@ self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) else: self.outputfilename = py.path.local(outputfilename) - self.eci = eci def build(self, noerr=False): basename = self.outputfilename.new(ext='') @@ -436,7 +436,7 @@ old = cfile.dirpath().chdir() try: res = compiler.compile([cfile.basename], - include_dirs=self.eci.include_dirs, + include_dirs=self.include_dirs, extra_preargs=self.compile_extra) assert len(res) == 1 cobjfile = py.path.local(res[0]) @@ -445,9 +445,9 @@ finally: old.chdir() compiler.link_executable(objects, str(self.outputfilename), - libraries=self.eci.libraries, + libraries=self.libraries, extra_preargs=self.link_extra, - library_dirs=self.eci.library_dirs) + library_dirs=self.library_dirs) def build_executable(*args, **kwds): noerr = kwds.pop('noerr', False) diff --git a/lib-python/2.7/BaseHTTPServer.py b/lib-python/2.7/BaseHTTPServer.py --- a/lib-python/2.7/BaseHTTPServer.py +++ b/lib-python/2.7/BaseHTTPServer.py @@ -310,7 +310,13 @@ """ try: - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return if not self.raw_requestline: self.close_connection = 1 return diff --git a/lib-python/2.7/ConfigParser.py b/lib-python/2.7/ConfigParser.py --- a/lib-python/2.7/ConfigParser.py +++ b/lib-python/2.7/ConfigParser.py @@ -545,6 +545,38 @@ if isinstance(val, list): options[name] = '\n'.join(val) +import UserDict as _UserDict + +class _Chainmap(_UserDict.DictMixin): + """Combine multiple mappings for successive lookups. + + For example, to emulate Python's normal lookup sequence: + + import __builtin__ + pylookup = _Chainmap(locals(), globals(), vars(__builtin__)) + """ + + def __init__(self, *maps): + self._maps = maps + + def __getitem__(self, key): + for mapping in self._maps: + try: + return mapping[key] + except KeyError: + pass + raise KeyError(key) + + def keys(self): + result = [] + seen = set() + for mapping in self_maps: + for key in mapping: + if key not in seen: + result.append(key) + seen.add(key) + return result + class ConfigParser(RawConfigParser): def get(self, section, option, raw=False, vars=None): @@ -559,16 +591,18 @@ The section DEFAULT is special. """ - d = self._defaults.copy() + sectiondict = {} try: - d.update(self._sections[section]) + sectiondict = self._sections[section] except KeyError: if section != DEFAULTSECT: raise NoSectionError(section) # Update with the entry specific variables + vardict = {} if vars: for key, value in vars.items(): - d[self.optionxform(key)] = value + vardict[self.optionxform(key)] = value + d = _Chainmap(vardict, sectiondict, self._defaults) option = self.optionxform(option) try: value = d[option] diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -258,6 +258,11 @@ '\033' : '\\033', '\034' : '\\034', '\035' : '\\035', '\036' : '\\036', '\037' : '\\037', + # Because of the way browsers really handle cookies (as opposed + # to what the RFC says) we also encode , and ; + + ',' : '\\054', ';' : '\\073', + '"' : '\\"', '\\' : '\\\\', '\177' : '\\177', '\200' : '\\200', '\201' : '\\201', diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -26,7 +26,7 @@ tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*') attrfind = re.compile( r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*' - r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?') + r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?') locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name @@ -99,7 +99,7 @@ markupbase.ParserBase.reset(self) def feed(self, data): - """Feed data to the parser. + r"""Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n'). @@ -367,13 +367,16 @@ return s def replaceEntities(s): s = s.groups()[0] - if s[0] == "#": - s = s[1:] - if s[0] in ['x','X']: - c = int(s[1:], 16) - else: - c = int(s) - return unichr(c) + try: + if s[0] == "#": + s = s[1:] + if s[0] in ['x','X']: + c = int(s[1:], 16) + else: + c = int(s) + return unichr(c) + except ValueError: + return '&#'+s+';' else: # Cannot use name2codepoint directly, because HTMLParser supports apos, # which is not part of HTML 4 diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -15,6 +15,7 @@ import BaseHTTPServer import urllib import cgi +import sys import shutil import mimetypes try: @@ -131,7 +132,8 @@ length = f.tell() f.seek(0) self.send_response(200) - self.send_header("Content-type", "text/html") + encoding = sys.getfilesystemencoding() + self.send_header("Content-type", "text/html; charset=%s" % encoding) self.send_header("Content-Length", str(length)) self.end_headers() return f diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -246,7 +246,7 @@ marshalled data. For backwards compatibility, a dispatch function can be provided as an argument (see comment in SimpleXMLRPCRequestHandler.do_POST) but overriding the - existing method through subclassing is the prefered means + existing method through subclassing is the preferred means of changing method dispatch behavior. """ @@ -486,7 +486,10 @@ L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -675,7 +675,7 @@ # A timeout to apply to the request socket, if not None. timeout = None - # Disable nagle algoritm for this socket, if True. + # Disable nagle algorithm for this socket, if True. # Use only when wbufsize != 0, to avoid small packets. disable_nagle_algorithm = False diff --git a/lib-python/2.7/StringIO.py b/lib-python/2.7/StringIO.py --- a/lib-python/2.7/StringIO.py +++ b/lib-python/2.7/StringIO.py @@ -266,6 +266,7 @@ 8th bit) will cause a UnicodeError to be raised when getvalue() is called. """ + _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -82,7 +82,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Iterator: - if _hasattr(C, "next"): + if _hasattr(C, "next") and _hasattr(C, "__iter__"): return True return NotImplemented diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -16,6 +16,7 @@ import io from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END) +from errno import EINTR __metaclass__ = type @@ -559,7 +560,11 @@ if not data: break res += data - return bytes(res) + if res: + return bytes(res) + else: + # b'' or None + return data def readinto(self, b): """Read up to len(b) bytes into b. @@ -678,7 +683,7 @@ """ def __init__(self, raw): - self.raw = raw + self._raw = raw ### Positioning ### @@ -722,8 +727,8 @@ if self.raw is None: raise ValueError("raw stream already detached") self.flush() - raw = self.raw - self.raw = None + raw = self._raw + self._raw = None return raw ### Inquiries ### @@ -738,6 +743,10 @@ return self.raw.writable() @property + def raw(self): + return self._raw + + @property def closed(self): return self.raw.closed @@ -933,7 +942,12 @@ current_size = 0 while True: # Read until EOF or until read() would block. - chunk = self.raw.read() + try: + chunk = self.raw.read() + except IOError as e: + if e.errno != EINTR: + raise + continue if chunk in empty_values: nodata_val = chunk break @@ -952,7 +966,12 @@ chunks = [buf[pos:]] wanted = max(self.buffer_size, n) while avail < n: - chunk = self.raw.read(wanted) + try: + chunk = self.raw.read(wanted) + except IOError as e: + if e.errno != EINTR: + raise + continue if chunk in empty_values: nodata_val = chunk break @@ -981,7 +1000,14 @@ have = len(self._read_buf) - self._read_pos if have < want or have <= 0: to_read = self.buffer_size - have - current = self.raw.read(to_read) + while True: + try: + current = self.raw.read(to_read) + except IOError as e: + if e.errno != EINTR: + raise + continue + break if current: self._read_buf = self._read_buf[self._read_pos:] + current self._read_pos = 0 @@ -1088,7 +1114,12 @@ written = 0 try: while self._write_buf: - n = self.raw.write(self._write_buf) + try: + n = self.raw.write(self._write_buf) + except IOError as e: + if e.errno != EINTR: + raise + continue if n > len(self._write_buf) or n < 0: raise IOError("write() returned incorrect number of bytes") del self._write_buf[:n] @@ -1456,7 +1487,7 @@ if not isinstance(errors, basestring): raise ValueError("invalid errors: %r" % errors) - self.buffer = buffer + self._buffer = buffer self._line_buffering = line_buffering self._encoding = encoding self._errors = errors @@ -1511,6 +1542,10 @@ def line_buffering(self): return self._line_buffering + @property + def buffer(self): + return self._buffer + def seekable(self): return self._seekable @@ -1724,8 +1759,8 @@ if self.buffer is None: raise ValueError("buffer is already detached") self.flush() - buffer = self.buffer - self.buffer = None + buffer = self._buffer + self._buffer = None return buffer def seek(self, cookie, whence=0): diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -66,7 +66,11 @@ return sum(x() is not None for x in self.data) def __contains__(self, item): - return ref(item) in self.data + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data def __reduce__(self): return (self.__class__, (list(self),), diff --git a/lib-python/2.7/anydbm.py b/lib-python/2.7/anydbm.py --- a/lib-python/2.7/anydbm.py +++ b/lib-python/2.7/anydbm.py @@ -29,17 +29,8 @@ list = d.keys() # return a list of all existing keys (slow!) Future versions may change the order in which implementations are -tested for existence, add interfaces to other dbm-like +tested for existence, and add interfaces to other dbm-like implementations. - -The open function has an optional second argument. This can be 'r', -for read-only access, 'w', for read-write access of an existing -database, 'c' for read-write access to a new or existing database, and -'n' for read-write access to a new database. The default is 'r'. - -Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it -only if it doesn't exist; and 'n' always creates a new database. - """ class error(Exception): @@ -63,7 +54,18 @@ error = tuple(_errors) -def open(file, flag = 'r', mode = 0666): +def open(file, flag='r', mode=0666): + """Open or create database at path given by *file*. + + Optional argument *flag* can be 'r' (default) for read-only access, 'w' + for read-write access of an existing database, 'c' for read-write access + to a new or existing database, and 'n' for read-write access to a new + database. + + Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it + only if it doesn't exist; and 'n' always creates a new database. + """ + # guess the type of an existing database from whichdb import whichdb result=whichdb(file) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -82,6 +82,7 @@ ] +import collections as _collections import copy as _copy import os as _os import re as _re @@ -1037,7 +1038,7 @@ self._prog_prefix = prog self._parser_class = parser_class - self._name_parser_map = {} + self._name_parser_map = _collections.OrderedDict() self._choices_actions = [] super(_SubParsersAction, self).__init__( @@ -1080,7 +1081,7 @@ parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) - msg = _('unknown parser %r (choices: %s)' % tup) + msg = _('unknown parser %r (choices: %s)') % tup raise ArgumentError(self, msg) # parse all the remaining options into the namespace @@ -1109,7 +1110,7 @@ the builtin open() function. """ - def __init__(self, mode='r', bufsize=None): + def __init__(self, mode='r', bufsize=-1): self._mode = mode self._bufsize = bufsize @@ -1121,18 +1122,19 @@ elif 'w' in self._mode: return _sys.stdout else: - msg = _('argument "-" with mode %r' % self._mode) + msg = _('argument "-" with mode %r') % self._mode raise ValueError(msg) # all other arguments are used as file names - if self._bufsize: + try: return open(string, self._mode, self._bufsize) - else: - return open(string, self._mode) + except IOError as e: + message = _("can't open '%s': %s") + raise ArgumentTypeError(message % (string, e)) def __repr__(self): - args = [self._mode, self._bufsize] - args_str = ', '.join([repr(arg) for arg in args if arg is not None]) + args = self._mode, self._bufsize + args_str = ', '.join(repr(arg) for arg in args if arg != -1) return '%s(%s)' % (type(self).__name__, args_str) # =========================== @@ -1275,13 +1277,20 @@ # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): - raise ValueError('unknown action "%s"' % action_class) + raise ValueError('unknown action "%s"' % (action_class,)) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): - raise ValueError('%r is not callable' % type_func) + raise ValueError('%r is not callable' % (type_func,)) + + # raise an error if the metavar does not match the type + if hasattr(self, "_get_formatter"): + try: + self._get_formatter()._format_args(action, None) + except TypeError: + raise ValueError("length of metavar tuple does not match nargs") return self._add_action(action) @@ -1481,6 +1490,7 @@ self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals + self._mutually_exclusive_groups = container._mutually_exclusive_groups def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) diff --git a/lib-python/2.7/ast.py b/lib-python/2.7/ast.py --- a/lib-python/2.7/ast.py +++ b/lib-python/2.7/ast.py @@ -29,12 +29,12 @@ from _ast import __version__ -def parse(expr, filename='', mode='exec'): +def parse(source, filename='', mode='exec'): """ - Parse an expression into an AST node. - Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST). + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). """ - return compile(expr, filename, mode, PyCF_ONLY_AST) + return compile(source, filename, mode, PyCF_ONLY_AST) def literal_eval(node_or_string): @@ -152,8 +152,6 @@ Increment the line number of each node in the tree starting at *node* by *n*. This is useful to "move code" to a different location in a file. """ - if 'lineno' in node._attributes: - node.lineno = getattr(node, 'lineno', 0) + n for child in walk(node): if 'lineno' in child._attributes: child.lineno = getattr(child, 'lineno', 0) + n @@ -204,9 +202,9 @@ def walk(node): """ - Recursively yield all child nodes of *node*, in no specified order. This is - useful if you only want to modify nodes in place and don't care about the - context. + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. """ from collections import deque todo = deque([node]) diff --git a/lib-python/2.7/asyncore.py b/lib-python/2.7/asyncore.py --- a/lib-python/2.7/asyncore.py +++ b/lib-python/2.7/asyncore.py @@ -54,7 +54,11 @@ import os from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \ - ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, errorcode + ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \ + errorcode + +_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, + EBADF)) try: socket_map @@ -109,7 +113,7 @@ if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): obj.handle_close() except socket.error, e: - if e.args[0] not in (EBADF, ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED): + if e.args[0] not in _DISCONNECTED: obj.handle_error() else: obj.handle_close() @@ -353,7 +357,7 @@ except TypeError: return None except socket.error as why: - if why.args[0] in (EWOULDBLOCK, ECONNABORTED): + if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN): return None else: raise @@ -367,7 +371,7 @@ except socket.error, why: if why.args[0] == EWOULDBLOCK: return 0 - elif why.args[0] in (ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED): + elif why.args[0] in _DISCONNECTED: self.handle_close() return 0 else: @@ -385,7 +389,7 @@ return data except socket.error, why: # winsock sometimes throws ENOTCONN - if why.args[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED]: + if why.args[0] in _DISCONNECTED: self.handle_close() return '' else: diff --git a/lib-python/2.7/bdb.py b/lib-python/2.7/bdb.py --- a/lib-python/2.7/bdb.py +++ b/lib-python/2.7/bdb.py @@ -250,6 +250,12 @@ list.append(lineno) bp = Breakpoint(filename, lineno, temporary, cond, funcname) + def _prune_breaks(self, filename, lineno): + if (filename, lineno) not in Breakpoint.bplist: + self.breaks[filename].remove(lineno) + if not self.breaks[filename]: + del self.breaks[filename] + def clear_break(self, filename, lineno): filename = self.canonic(filename) if not filename in self.breaks: @@ -261,10 +267,7 @@ # pair, then remove the breaks entry for bp in Breakpoint.bplist[filename, lineno][:]: bp.deleteMe() - if (filename, lineno) not in Breakpoint.bplist: - self.breaks[filename].remove(lineno) - if not self.breaks[filename]: - del self.breaks[filename] + self._prune_breaks(filename, lineno) def clear_bpbynumber(self, arg): try: @@ -277,7 +280,8 @@ return 'Breakpoint number (%d) out of range' % number if not bp: return 'Breakpoint (%d) already deleted' % number - self.clear_break(bp.file, bp.line) + bp.deleteMe() + self._prune_breaks(bp.file, bp.line) def clear_all_file_breaks(self, filename): filename = self.canonic(filename) diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -6,59 +6,38 @@ __all__ += _abcoll.__all__ from _collections import deque, defaultdict -from operator import itemgetter as _itemgetter, eq as _eq +from operator import itemgetter as _itemgetter from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq -from itertools import repeat as _repeat, chain as _chain, starmap as _starmap, \ - ifilter as _ifilter, imap as _imap +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap + try: - from thread import get_ident + from thread import get_ident as _get_ident except ImportError: - from dummy_thread import get_ident - -def _recursive_repr(user_function): - 'Decorator to make a repr function return "..." for a recursive call' - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return '...' - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - return wrapper + from dummy_thread import get_ident as _get_ident ################################################################################ ### OrderedDict ################################################################################ -class OrderedDict(dict, MutableMapping): +class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. - # Big-O running times for all methods are the same as for regular dictionaries. + # Big-O running times for all methods are the same as regular dictionaries. - # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. ''' if len(args) > 1: @@ -66,17 +45,15 @@ try: self.__root except AttributeError: - self.__root = root = [None, None, None] # sentinel node - PREV = 0 - NEXT = 1 - root[PREV] = root[NEXT] = root + self.__root = root = [] # sentinel node + root[:] = [root, root, None] self.__map = {} - self.update(*args, **kwds) + self.__update(*args, **kwds) def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link which goes at the end of the linked - # list, and the inherited dictionary is updated with the new key/value pair. + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[PREV] @@ -85,65 +62,160 @@ def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which is - # then removed by updating the links in the predecessor and successor nodes. + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) - link = self.__map.pop(key) - link_prev = link[PREV] - link_next = link[NEXT] + link_prev, link_next, key = self.__map.pop(key) link_prev[NEXT] = link_next link_next[PREV] = link_prev - def __iter__(self, NEXT=1, KEY=2): + def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. + NEXT, KEY = 1, 2 root = self.__root curr = root[NEXT] while curr is not root: yield curr[KEY] curr = curr[NEXT] - def __reversed__(self, PREV=0, KEY=2): + def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. + PREV, KEY = 0, 2 root = self.__root curr = root[PREV] while curr is not root: yield curr[KEY] curr = curr[PREV] + def clear(self): + 'od.clear() -> None. Remove all items from od.' + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] - tmp = self.__map, self.__root - del self.__map, self.__root inst_dict = vars(self).copy() - self.__map, self.__root = tmp + for k in vars(OrderedDict()): + inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) - def clear(self): - 'od.clear() -> None. Remove all items from od.' - try: - for node in self.__map.itervalues(): - del node[:] - self.__root[:] = [self.__root, self.__root, None] - self.__map.clear() - except AttributeError: - pass - dict.clear(self) + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) - setdefault = MutableMapping.setdefault - update = MutableMapping.update - pop = MutableMapping.pop - keys = MutableMapping.keys - values = MutableMapping.values - items = MutableMapping.items - iterkeys = MutableMapping.iterkeys - itervalues = MutableMapping.itervalues - iteritems = MutableMapping.iteritems - __ne__ = MutableMapping.__ne__ + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + 'od.__ne__(y) <==> od!=y' + return not self == other + + # -- the following methods support python 3.x style dictionary views -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" @@ -157,49 +229,6 @@ "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value - - @_recursive_repr - def __repr__(self): - 'od.__repr__() <==> repr(od)' - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S - and values equal to v (which defaults to None). - - ''' - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return len(self)==len(other) and \ - all(_imap(_eq, self.iteritems(), other.iteritems())) - return dict.__eq__(self, other) - ################################################################################ ### namedtuple @@ -328,16 +357,16 @@ or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. - >>> c = Counter('abracadabra') # count elements from a string + >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements - [('a', 5), ('r', 2), ('b', 2)] + [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements - ['a', 'b', 'c', 'd', 'r'] + ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions - 'aaaaabbcdrr' + 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts - 11 + 15 >>> c['a'] # count of letter 'a' 5 @@ -345,8 +374,8 @@ ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 - >>> del c['r'] # remove all 'r' - >>> c['r'] # now there are zero 'r' + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter @@ -385,6 +414,7 @@ >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' + super(Counter, self).__init__() self.update(iterable, **kwds) def __missing__(self, key): @@ -396,8 +426,8 @@ '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. - >>> Counter('abracadabra').most_common(3) - [('a', 5), ('r', 2), ('b', 2)] + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] ''' # Emulate Bag.sortedByCount from Smalltalk @@ -463,7 +493,7 @@ for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: - dict.update(self, iterable) # fast path when counter is empty + super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: @@ -499,13 +529,16 @@ self.subtract(kwds) def copy(self): - 'Like dict.copy() but returns a Counter instance instead of a dict.' - return Counter(self) + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: - dict.__delitem__(self, elem) + super(Counter, self).__delitem__(elem) def __repr__(self): if not self: @@ -532,10 +565,13 @@ if not isinstance(other, Counter): return NotImplemented result = Counter() - for elem in set(self) | set(other): - newcount = self[elem] + other[elem] + for elem, count in self.items(): + newcount = count + other[elem] if newcount > 0: result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count return result def __sub__(self, other): @@ -548,10 +584,13 @@ if not isinstance(other, Counter): return NotImplemented result = Counter() - for elem in set(self) | set(other): - newcount = self[elem] - other[elem] + for elem, count in self.items(): + newcount = count - other[elem] if newcount > 0: result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count return result def __or__(self, other): @@ -564,11 +603,14 @@ if not isinstance(other, Counter): return NotImplemented result = Counter() - for elem in set(self) | set(other): - p, q = self[elem], other[elem] - newcount = q if p < q else p + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count return result def __and__(self, other): @@ -581,11 +623,9 @@ if not isinstance(other, Counter): return NotImplemented result = Counter() - if len(self) < len(other): - self, other = other, self - for elem in _ifilter(self.__contains__, other): - p, q = self[elem], other[elem] - newcount = p if p < q else q + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result diff --git a/lib-python/2.7/compileall.py b/lib-python/2.7/compileall.py --- a/lib-python/2.7/compileall.py +++ b/lib-python/2.7/compileall.py @@ -9,7 +9,6 @@ packages -- for now, you'll have to deal with packages separately.) See module py_compile for details of the actual byte-compilation. - """ import os import sys @@ -31,7 +30,6 @@ directory name that will show up in error messages) force: if 1, force compilation, even if timestamps are up-to-date quiet: if 1, be quiet during compilation - """ if not quiet: print 'Listing', dir, '...' @@ -61,15 +59,16 @@ return success def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0): - """Byte-compile file. - file: the file to byte-compile + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile ddir: if given, purported directory name (this is the directory name that will show up in error messages) force: if 1, force compilation, even if timestamps are up-to-date quiet: if 1, be quiet during compilation - """ - success = 1 name = os.path.basename(fullname) if ddir is not None: @@ -120,7 +119,6 @@ maxlevels: max recursion level (default 0) force: as for compile_dir() (default 0) quiet: as for compile_dir() (default 0) - """ success = 1 for dir in sys.path: diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -281,7 +281,7 @@ an all or nothing approach, so we allow for small variations in this number. 1) build a table of the frequency of each character on every line. - 2) build a table of freqencies of this frequency (meta-frequency?), + 2) build a table of frequencies of this frequency (meta-frequency?), e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, 7 times in 2 rows' 3) use the mode of the meta-frequency to determine the /expected/ diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -37,7 +37,7 @@ values = [ia[i] for i in range(len(init))] self.assertEqual(values, [0] * len(init)) - # Too many in itializers should be caught + # Too many initializers should be caught self.assertRaises(IndexError, int_array, *range(alen*2)) CharArray = ARRAY(c_char, 3) diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -187,6 +187,18 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + def test_recursive_as_param(self): + from ctypes import c_int + + class A(object): + pass + + a = A() + a._as_parameter_ = a + with self.assertRaises(RuntimeError): + c_int.from_param(a) + + #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class AsParamWrapper(object): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -206,6 +206,42 @@ windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + def test_callback_register_int(self): + # Issue #8275: buggy handling of callback args under Win64 + # NOTE: should be run on release builds as well + dll = CDLL(_ctypes_test.__file__) + CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int) + # All this function does is call the callback with its args squared + func = dll._testfunc_cbk_reg_int + func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK) + func.restype = c_int + + def callback(a, b, c, d, e): + return a + b + c + d + e + + result = func(2, 3, 4, 5, 6, CALLBACK(callback)) + self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6)) + + def test_callback_register_double(self): + # Issue #8275: buggy handling of callback args under Win64 + # NOTE: should be run on release builds as well + dll = CDLL(_ctypes_test.__file__) + CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double, + c_double, c_double) + # All this function does is call the callback with its args squared + func = dll._testfunc_cbk_reg_double + func.argtypes = (c_double, c_double, c_double, + c_double, c_double, CALLBACK) + func.restype = c_double + + def callback(a, b, c, d, e): + return a + b + c + d + e + + result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback)) + self.assertEqual(result, + callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + + ################################################################ if __name__ == '__main__': diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -116,7 +116,7 @@ self.assertEqual(result, 21) self.assertEqual(type(result), int) - # You cannot assing character format codes as restype any longer + # You cannot assign character format codes as restype any longer self.assertRaises(TypeError, setattr, f, "restype", "i") def test_floatresult(self): diff --git a/lib-python/2.7/ctypes/test/test_init.py b/lib-python/2.7/ctypes/test/test_init.py --- a/lib-python/2.7/ctypes/test/test_init.py +++ b/lib-python/2.7/ctypes/test/test_init.py @@ -27,7 +27,7 @@ self.assertEqual((y.x.a, y.x.b), (0, 0)) self.assertEqual(y.x.new_was_called, False) - # But explicitely creating an X structure calls __new__ and __init__, of course. + # But explicitly creating an X structure calls __new__ and __init__, of course. x = X() self.assertEqual((x.a, x.b), (9, 12)) self.assertEqual(x.new_was_called, True) diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -157,7 +157,7 @@ def test_int_from_address(self): from array import array for t in signed_types + unsigned_types: - # the array module doesn't suppport all format codes + # the array module doesn't support all format codes # (no 'q' or 'Q') try: array(t._type_) diff --git a/lib-python/2.7/ctypes/test/test_win32.py b/lib-python/2.7/ctypes/test/test_win32.py --- a/lib-python/2.7/ctypes/test/test_win32.py +++ b/lib-python/2.7/ctypes/test/test_win32.py @@ -17,7 +17,7 @@ # ValueError: Procedure probably called with not enough arguments (4 bytes missing) self.assertRaises(ValueError, IsWindow) - # This one should succeeed... + # This one should succeed... self.assertEqual(0, IsWindow(0)) # ValueError: Procedure probably called with too many arguments (8 bytes in excess) diff --git a/lib-python/2.7/curses/wrapper.py b/lib-python/2.7/curses/wrapper.py --- a/lib-python/2.7/curses/wrapper.py +++ b/lib-python/2.7/curses/wrapper.py @@ -43,7 +43,8 @@ return func(stdscr, *args, **kwds) finally: # Set everything back to normal - stdscr.keypad(0) - curses.echo() - curses.nocbreak() - curses.endwin() + if 'stdscr' in locals(): + stdscr.keypad(0) + curses.echo() + curses.nocbreak() + curses.endwin() diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1068,14 +1068,16 @@ if ans: return ans - if not self: - # -Decimal('0') is Decimal('0'), not Decimal('-0') + if context is None: + context = getcontext() + + if not self and context.rounding != ROUND_FLOOR: + # -Decimal('0') is Decimal('0'), not Decimal('-0'), except + # in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = self.copy_negate() - if context is None: - context = getcontext() return ans._fix(context) def __pos__(self, context=None): @@ -1088,14 +1090,15 @@ if ans: return ans - if not self: - # + (-0) = 0 + if context is None: + context = getcontext() + + if not self and context.rounding != ROUND_FLOOR: + # + (-0) = 0, except in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = Decimal(self) - if context is None: - context = getcontext() return ans._fix(context) def __abs__(self, round=True, context=None): @@ -1680,7 +1683,7 @@ self = _dec_from_triple(self._sign, '1', exp_min-1) digits = 0 rounding_method = self._pick_rounding_function[context.rounding] - changed = getattr(self, rounding_method)(digits) + changed = rounding_method(self, digits) coeff = self._int[:digits] or '0' if changed > 0: coeff = str(int(coeff)+1) @@ -1720,8 +1723,6 @@ # here self was representable to begin with; return unchanged return Decimal(self) - _pick_rounding_function = {} - # for each of the rounding functions below: # self is a finite, nonzero Decimal # prec is an integer satisfying 0 <= prec < len(self._int) @@ -1788,6 +1789,17 @@ else: return -self._round_down(prec) + _pick_rounding_function = dict( + ROUND_DOWN = _round_down, + ROUND_UP = _round_up, + ROUND_HALF_UP = _round_half_up, + ROUND_HALF_DOWN = _round_half_down, + ROUND_HALF_EVEN = _round_half_even, + ROUND_CEILING = _round_ceiling, + ROUND_FLOOR = _round_floor, + ROUND_05UP = _round_05up, + ) + def fma(self, other, third, context=None): """Fused multiply-add. @@ -2492,8 +2504,8 @@ if digits < 0: self = _dec_from_triple(self._sign, '1', exp-1) digits = 0 - this_function = getattr(self, self._pick_rounding_function[rounding]) - changed = this_function(digits) + this_function = self._pick_rounding_function[rounding] + changed = this_function(self, digits) coeff = self._int[:digits] or '0' if changed == 1: coeff = str(int(coeff)+1) @@ -3705,18 +3717,6 @@ ##### Context class ####################################################### - -# get rounding method function: -rounding_functions = [name for name in Decimal.__dict__.keys() - if name.startswith('_round_')] -for name in rounding_functions: - # name is like _round_half_even, goes to the global ROUND_HALF_EVEN value. - globalname = name[1:].upper() - val = globals()[globalname] - Decimal._pick_rounding_function[val] = name - -del name, val, globalname, rounding_functions - class _ContextManager(object): """Context manager class to support localcontext(). @@ -5990,7 +5990,7 @@ def _format_align(sign, body, spec): """Given an unpadded, non-aligned numeric string 'body' and sign - string 'sign', add padding and aligment conforming to the given + string 'sign', add padding and alignment conforming to the given format specifier dictionary 'spec' (as produced by parse_format_specifier). diff --git a/lib-python/2.7/difflib.py b/lib-python/2.7/difflib.py --- a/lib-python/2.7/difflib.py +++ b/lib-python/2.7/difflib.py @@ -1140,6 +1140,21 @@ return ch in ws +######################################################################## +### Unified Diff +######################################################################## + +def _format_range_unified(start, stop): + 'Convert range to the "ed" format' + # Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning = start + 1 # lines start numbering with one + length = stop - start + if length == 1: + return '{}'.format(beginning) + if not length: + beginning -= 1 # empty ranges begin at line just before the range + return '{},{}'.format(beginning, length) + def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): r""" @@ -1184,25 +1199,45 @@ started = False for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): if not started: - fromdate = '\t%s' % fromfiledate if fromfiledate else '' - todate = '\t%s' % tofiledate if tofiledate else '' - yield '--- %s%s%s' % (fromfile, fromdate, lineterm) - yield '+++ %s%s%s' % (tofile, todate, lineterm) started = True - i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4] - yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm) + fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' + todate = '\t{}'.format(tofiledate) if tofiledate else '' + yield '--- {}{}{}'.format(fromfile, fromdate, lineterm) + yield '+++ {}{}{}'.format(tofile, todate, lineterm) + + first, last = group[0], group[-1] + file1_range = _format_range_unified(first[1], last[2]) + file2_range = _format_range_unified(first[3], last[4]) + yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm) + for tag, i1, i2, j1, j2 in group: if tag == 'equal': for line in a[i1:i2]: yield ' ' + line continue - if tag == 'replace' or tag == 'delete': + if tag in ('replace', 'delete'): for line in a[i1:i2]: yield '-' + line - if tag == 'replace' or tag == 'insert': + if tag in ('replace', 'insert'): for line in b[j1:j2]: yield '+' + line + +######################################################################## +### Context Diff +######################################################################## + +def _format_range_context(start, stop): + 'Convert range to the "ed" format' + # Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning = start + 1 # lines start numbering with one + length = stop - start + if not length: + beginning -= 1 # empty ranges begin at line just before the range + if length <= 1: + return '{}'.format(beginning) + return '{},{}'.format(beginning, beginning + length - 1) + # See http://www.unix.org/single_unix_specification/ def context_diff(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): @@ -1247,38 +1282,36 @@ four """ + prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ') started = False - prefixmap = {'insert':'+ ', 'delete':'- ', 'replace':'! ', 'equal':' '} for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): if not started: - fromdate = '\t%s' % fromfiledate if fromfiledate else '' - todate = '\t%s' % tofiledate if tofiledate else '' - yield '*** %s%s%s' % (fromfile, fromdate, lineterm) - yield '--- %s%s%s' % (tofile, todate, lineterm) started = True + fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' + todate = '\t{}'.format(tofiledate) if tofiledate else '' + yield '*** {}{}{}'.format(fromfile, fromdate, lineterm) + yield '--- {}{}{}'.format(tofile, todate, lineterm) - yield '***************%s' % (lineterm,) - if group[-1][2] - group[0][1] >= 2: - yield '*** %d,%d ****%s' % (group[0][1]+1, group[-1][2], lineterm) - else: - yield '*** %d ****%s' % (group[-1][2], lineterm) - visiblechanges = [e for e in group if e[0] in ('replace', 'delete')] - if visiblechanges: + first, last = group[0], group[-1] + yield '***************' + lineterm + + file1_range = _format_range_context(first[1], last[2]) + yield '*** {} ****{}'.format(file1_range, lineterm) + + if any(tag in ('replace', 'delete') for tag, _, _, _, _ in group): for tag, i1, i2, _, _ in group: if tag != 'insert': for line in a[i1:i2]: - yield prefixmap[tag] + line + yield prefix[tag] + line - if group[-1][4] - group[0][3] >= 2: - yield '--- %d,%d ----%s' % (group[0][3]+1, group[-1][4], lineterm) - else: - yield '--- %d ----%s' % (group[-1][4], lineterm) - visiblechanges = [e for e in group if e[0] in ('replace', 'insert')] - if visiblechanges: + file2_range = _format_range_context(first[3], last[4]) + yield '--- {} ----{}'.format(file2_range, lineterm) + + if any(tag in ('replace', 'insert') for tag, _, _, _, _ in group): for tag, _, _, j1, j2 in group: if tag != 'delete': for line in b[j1:j2]: - yield prefixmap[tag] + line + yield prefix[tag] + line def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK): r""" @@ -1714,7 +1747,7 @@ line = line.replace(' ','\0') # expand tabs into spaces line = line.expandtabs(self._tabsize) - # relace spaces from expanded tabs back into tab characters + # replace spaces from expanded tabs back into tab characters # (we'll replace them with markup after we do differencing) line = line.replace(' ','\t') return line.replace('\0',' ').rstrip('\n') diff --git a/lib-python/2.7/distutils/__init__.py b/lib-python/2.7/distutils/__init__.py --- a/lib-python/2.7/distutils/__init__.py +++ b/lib-python/2.7/distutils/__init__.py @@ -15,5 +15,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "2.7.1" +__version__ = "2.7.2" #--end constants-- diff --git a/lib-python/2.7/distutils/archive_util.py b/lib-python/2.7/distutils/archive_util.py --- a/lib-python/2.7/distutils/archive_util.py +++ b/lib-python/2.7/distutils/archive_util.py @@ -121,7 +121,7 @@ def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. - The output zip file will be named 'base_dir' + ".zip". Uses either the + The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -377,7 +377,7 @@ dry_run=self.dry_run) def move_file (self, src, dst, level=1): - """Move a file respectin dry-run flag.""" + """Move a file respecting dry-run flag.""" return file_util.move_file(src, dst, dry_run = self.dry_run) def spawn (self, cmd, search_path=1, level=1): diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -207,7 +207,7 @@ elif MSVC_VERSION == 8: self.library_dirs.append(os.path.join(sys.exec_prefix, - 'PC', 'VS8.0', 'win32release')) + 'PC', 'VS8.0')) elif MSVC_VERSION == 7: self.library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS7.1')) diff --git a/lib-python/2.7/distutils/command/sdist.py b/lib-python/2.7/distutils/command/sdist.py --- a/lib-python/2.7/distutils/command/sdist.py +++ b/lib-python/2.7/distutils/command/sdist.py @@ -306,17 +306,20 @@ rstrip_ws=1, collapse_join=1) - while 1: - line = template.readline() - if line is None: # end of file - break + try: + while 1: + line = template.readline() + if line is None: # end of file + break - try: - self.filelist.process_template_line(line) - except DistutilsTemplateError, msg: - self.warn("%s, line %d: %s" % (template.filename, - template.current_line, - msg)) + try: + self.filelist.process_template_line(line) + except DistutilsTemplateError, msg: + self.warn("%s, line %d: %s" % (template.filename, + template.current_line, + msg)) + finally: + template.close() def prune_file_list(self): """Prune off branches that might slip into the file list as created diff --git a/lib-python/2.7/distutils/command/upload.py b/lib-python/2.7/distutils/command/upload.py --- a/lib-python/2.7/distutils/command/upload.py +++ b/lib-python/2.7/distutils/command/upload.py @@ -176,6 +176,9 @@ result = urlopen(request) status = result.getcode() reason = result.msg + if self.show_response: + msg = '\n'.join(('-' * 75, r.read(), '-' * 75)) + self.announce(msg, log.INFO) except socket.error, e: self.announce(str(e), log.ERROR) return @@ -189,6 +192,3 @@ else: self.announce('Upload failed (%s): %s' % (status, reason), log.ERROR) - if self.show_response: - msg = '\n'.join(('-' * 75, r.read(), '-' * 75)) - self.announce(msg, log.INFO) diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -389,7 +389,7 @@ cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '') if cur_target == '': cur_target = cfg_target - os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = cfg_target elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')): my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure' % (cur_target, cfg_target)) diff --git a/lib-python/2.7/distutils/tests/__init__.py b/lib-python/2.7/distutils/tests/__init__.py --- a/lib-python/2.7/distutils/tests/__init__.py +++ b/lib-python/2.7/distutils/tests/__init__.py @@ -15,9 +15,10 @@ import os import sys import unittest +from test.test_support import run_unittest -here = os.path.dirname(__file__) +here = os.path.dirname(__file__) or os.curdir def test_suite(): @@ -32,4 +33,4 @@ if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_archive_util.py b/lib-python/2.7/distutils/tests/test_archive_util.py --- a/lib-python/2.7/distutils/tests/test_archive_util.py +++ b/lib-python/2.7/distutils/tests/test_archive_util.py @@ -12,7 +12,7 @@ ARCHIVE_FORMATS) from distutils.spawn import find_executable, spawn from distutils.tests import support -from test.test_support import check_warnings +from test.test_support import check_warnings, run_unittest try: import grp @@ -281,4 +281,4 @@ return unittest.makeSuite(ArchiveUtilTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_bdist_msi.py b/lib-python/2.7/distutils/tests/test_bdist_msi.py --- a/lib-python/2.7/distutils/tests/test_bdist_msi.py +++ b/lib-python/2.7/distutils/tests/test_bdist_msi.py @@ -11,7 +11,7 @@ support.LoggingSilencer, unittest.TestCase): - def test_minial(self): + def test_minimal(self): # minimal test XXX need more tests from distutils.command.bdist_msi import bdist_msi pkg_pth, dist = self.create_dist() diff --git a/lib-python/2.7/distutils/tests/test_build.py b/lib-python/2.7/distutils/tests/test_build.py --- a/lib-python/2.7/distutils/tests/test_build.py +++ b/lib-python/2.7/distutils/tests/test_build.py @@ -2,6 +2,7 @@ import unittest import os import sys +from test.test_support import run_unittest from distutils.command.build import build from distutils.tests import support @@ -51,4 +52,4 @@ return unittest.makeSuite(BuildTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_build_clib.py b/lib-python/2.7/distutils/tests/test_build_clib.py --- a/lib-python/2.7/distutils/tests/test_build_clib.py +++ b/lib-python/2.7/distutils/tests/test_build_clib.py @@ -3,6 +3,8 @@ import os import sys +from test.test_support import run_unittest + from distutils.command.build_clib import build_clib from distutils.errors import DistutilsSetupError from distutils.tests import support @@ -140,4 +142,4 @@ return unittest.makeSuite(BuildCLibTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -3,12 +3,13 @@ import tempfile import shutil from StringIO import StringIO +import textwrap from distutils.core import Extension, Distribution from distutils.command.build_ext import build_ext from distutils import sysconfig from distutils.tests import support -from distutils.errors import DistutilsSetupError +from distutils.errors import DistutilsSetupError, CompileError import unittest from test import test_support @@ -430,6 +431,59 @@ wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext) self.assertEqual(ext_path, wanted) + @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') + def test_deployment_target(self): + self._try_compile_deployment_target() + + orig_environ = os.environ + os.environ = orig_environ.copy() + self.addCleanup(setattr, os, 'environ', orig_environ) + + os.environ['MACOSX_DEPLOYMENT_TARGET']='10.1' + self._try_compile_deployment_target() + + + def _try_compile_deployment_target(self): + deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c') + + with open(deptarget_c, 'w') as fp: + fp.write(textwrap.dedent('''\ + #include + + int dummy; + + #if TARGET != MAC_OS_X_VERSION_MIN_REQUIRED + #error "Unexpected target" + #endif + + ''')) + + target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') + target = tuple(map(int, target.split('.'))) + target = '%02d%01d0' % target + + deptarget_ext = Extension( + 'deptarget', + [deptarget_c], + extra_compile_args=['-DTARGET=%s'%(target,)], + ) + dist = Distribution({ + 'name': 'deptarget', + 'ext_modules': [deptarget_ext] + }) + dist.package_dir = self.tmp_dir + cmd = build_ext(dist) + cmd.build_lib = self.tmp_dir + cmd.build_temp = self.tmp_dir + + try: + old_stdout = sys.stdout + cmd.ensure_finalized() + cmd.run() + + except CompileError: + self.fail("Wrong deployment target during compilation") + def test_suite(): return unittest.makeSuite(BuildExtTestCase) diff --git a/lib-python/2.7/distutils/tests/test_build_py.py b/lib-python/2.7/distutils/tests/test_build_py.py --- a/lib-python/2.7/distutils/tests/test_build_py.py +++ b/lib-python/2.7/distutils/tests/test_build_py.py @@ -10,13 +10,14 @@ from distutils.errors import DistutilsFileError from distutils.tests import support +from test.test_support import run_unittest class BuildPyTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): - def _setup_package_data(self): + def test_package_data(self): sources = self.mkdtemp() f = open(os.path.join(sources, "__init__.py"), "w") try: @@ -56,20 +57,15 @@ self.assertEqual(len(cmd.get_outputs()), 3) pkgdest = os.path.join(destination, "pkg") files = os.listdir(pkgdest) - return files + self.assertIn("__init__.py", files) + self.assertIn("README.txt", files) + # XXX even with -O, distutils writes pyc, not pyo; bug? + if sys.dont_write_bytecode: + self.assertNotIn("__init__.pyc", files) + else: + self.assertIn("__init__.pyc", files) - def test_package_data(self): - files = self._setup_package_data() - self.assertTrue("__init__.py" in files) - self.assertTrue("README.txt" in files) - - @unittest.skipIf(sys.flags.optimize >= 2, - "pyc files are not written with -O2 and above") - def test_package_data_pyc(self): - files = self._setup_package_data() - self.assertTrue("__init__.pyc" in files) - - def test_empty_package_dir (self): + def test_empty_package_dir(self): # See SF 1668596/1720897. cwd = os.getcwd() @@ -117,10 +113,10 @@ finally: sys.dont_write_bytecode = old_dont_write_bytecode - self.assertTrue('byte-compiling is disabled' in self.logs[0][1]) + self.assertIn('byte-compiling is disabled', self.logs[0][1]) def test_suite(): return unittest.makeSuite(BuildPyTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_build_scripts.py b/lib-python/2.7/distutils/tests/test_build_scripts.py --- a/lib-python/2.7/distutils/tests/test_build_scripts.py +++ b/lib-python/2.7/distutils/tests/test_build_scripts.py @@ -8,6 +8,7 @@ import sysconfig from distutils.tests import support +from test.test_support import run_unittest class BuildScriptsTestCase(support.TempdirManager, @@ -108,4 +109,4 @@ return unittest.makeSuite(BuildScriptsTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_check.py b/lib-python/2.7/distutils/tests/test_check.py --- a/lib-python/2.7/distutils/tests/test_check.py +++ b/lib-python/2.7/distutils/tests/test_check.py @@ -1,5 +1,6 @@ """Tests for distutils.command.check.""" import unittest +from test.test_support import run_unittest from distutils.command.check import check, HAS_DOCUTILS from distutils.tests import support @@ -95,4 +96,4 @@ return unittest.makeSuite(CheckTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_clean.py b/lib-python/2.7/distutils/tests/test_clean.py --- a/lib-python/2.7/distutils/tests/test_clean.py +++ b/lib-python/2.7/distutils/tests/test_clean.py @@ -6,6 +6,7 @@ from distutils.command.clean import clean from distutils.tests import support +from test.test_support import run_unittest class cleanTestCase(support.TempdirManager, support.LoggingSilencer, @@ -38,7 +39,7 @@ self.assertTrue(not os.path.exists(path), '%s was not removed' % path) - # let's run the command again (should spit warnings but suceed) + # let's run the command again (should spit warnings but succeed) cmd.all = 1 cmd.ensure_finalized() cmd.run() @@ -47,4 +48,4 @@ return unittest.makeSuite(cleanTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_cmd.py b/lib-python/2.7/distutils/tests/test_cmd.py --- a/lib-python/2.7/distutils/tests/test_cmd.py +++ b/lib-python/2.7/distutils/tests/test_cmd.py @@ -99,7 +99,7 @@ def test_ensure_dirname(self): cmd = self.cmd - cmd.option1 = os.path.dirname(__file__) + cmd.option1 = os.path.dirname(__file__) or os.curdir cmd.ensure_dirname('option1') cmd.option2 = 'xxx' self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2') diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -11,6 +11,7 @@ from distutils.log import WARN from distutils.tests import support +from test.test_support import run_unittest PYPIRC = """\ [distutils] @@ -119,4 +120,4 @@ return unittest.makeSuite(PyPIRCCommandTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_config_cmd.py b/lib-python/2.7/distutils/tests/test_config_cmd.py --- a/lib-python/2.7/distutils/tests/test_config_cmd.py +++ b/lib-python/2.7/distutils/tests/test_config_cmd.py @@ -2,6 +2,7 @@ import unittest import os import sys +from test.test_support import run_unittest from distutils.command.config import dump_file, config from distutils.tests import support @@ -86,4 +87,4 @@ return unittest.makeSuite(ConfigTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_core.py b/lib-python/2.7/distutils/tests/test_core.py --- a/lib-python/2.7/distutils/tests/test_core.py +++ b/lib-python/2.7/distutils/tests/test_core.py @@ -6,7 +6,7 @@ import shutil import sys import test.test_support -from test.test_support import captured_stdout +from test.test_support import captured_stdout, run_unittest import unittest from distutils.tests import support @@ -105,4 +105,4 @@ return unittest.makeSuite(CoreTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_dep_util.py b/lib-python/2.7/distutils/tests/test_dep_util.py --- a/lib-python/2.7/distutils/tests/test_dep_util.py +++ b/lib-python/2.7/distutils/tests/test_dep_util.py @@ -6,6 +6,7 @@ from distutils.dep_util import newer, newer_pairwise, newer_group from distutils.errors import DistutilsFileError from distutils.tests import support +from test.test_support import run_unittest class DepUtilTestCase(support.TempdirManager, unittest.TestCase): @@ -77,4 +78,4 @@ return unittest.makeSuite(DepUtilTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_dir_util.py b/lib-python/2.7/distutils/tests/test_dir_util.py --- a/lib-python/2.7/distutils/tests/test_dir_util.py +++ b/lib-python/2.7/distutils/tests/test_dir_util.py @@ -10,6 +10,7 @@ from distutils import log from distutils.tests import support +from test.test_support import run_unittest class DirUtilTestCase(support.TempdirManager, unittest.TestCase): @@ -112,4 +113,4 @@ return unittest.makeSuite(DirUtilTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_dist.py b/lib-python/2.7/distutils/tests/test_dist.py --- a/lib-python/2.7/distutils/tests/test_dist.py +++ b/lib-python/2.7/distutils/tests/test_dist.py @@ -11,7 +11,7 @@ from distutils.dist import Distribution, fix_help_options, DistributionMetadata from distutils.cmd import Command import distutils.dist -from test.test_support import TESTFN, captured_stdout +from test.test_support import TESTFN, captured_stdout, run_unittest from distutils.tests import support class test_dist(Command): @@ -433,4 +433,4 @@ return suite if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_file_util.py b/lib-python/2.7/distutils/tests/test_file_util.py --- a/lib-python/2.7/distutils/tests/test_file_util.py +++ b/lib-python/2.7/distutils/tests/test_file_util.py @@ -6,6 +6,7 @@ from distutils.file_util import move_file, write_file, copy_file from distutils import log from distutils.tests import support +from test.test_support import run_unittest class FileUtilTestCase(support.TempdirManager, unittest.TestCase): @@ -77,4 +78,4 @@ return unittest.makeSuite(FileUtilTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_filelist.py b/lib-python/2.7/distutils/tests/test_filelist.py --- a/lib-python/2.7/distutils/tests/test_filelist.py +++ b/lib-python/2.7/distutils/tests/test_filelist.py @@ -1,7 +1,7 @@ """Tests for distutils.filelist.""" from os.path import join import unittest -from test.test_support import captured_stdout +from test.test_support import captured_stdout, run_unittest from distutils.filelist import glob_to_re, FileList from distutils import debug @@ -82,4 +82,4 @@ return unittest.makeSuite(FileListTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_install.py b/lib-python/2.7/distutils/tests/test_install.py --- a/lib-python/2.7/distutils/tests/test_install.py +++ b/lib-python/2.7/distutils/tests/test_install.py @@ -3,6 +3,8 @@ import os import unittest +from test.test_support import run_unittest + from distutils.command.install import install from distutils.core import Distribution @@ -52,4 +54,4 @@ return unittest.makeSuite(InstallTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_install_data.py b/lib-python/2.7/distutils/tests/test_install_data.py --- a/lib-python/2.7/distutils/tests/test_install_data.py +++ b/lib-python/2.7/distutils/tests/test_install_data.py @@ -6,6 +6,7 @@ from distutils.command.install_data import install_data from distutils.tests import support +from test.test_support import run_unittest class InstallDataTestCase(support.TempdirManager, support.LoggingSilencer, @@ -73,4 +74,4 @@ return unittest.makeSuite(InstallDataTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_install_headers.py b/lib-python/2.7/distutils/tests/test_install_headers.py --- a/lib-python/2.7/distutils/tests/test_install_headers.py +++ b/lib-python/2.7/distutils/tests/test_install_headers.py @@ -6,6 +6,7 @@ from distutils.command.install_headers import install_headers from distutils.tests import support +from test.test_support import run_unittest class InstallHeadersTestCase(support.TempdirManager, support.LoggingSilencer, @@ -37,4 +38,4 @@ return unittest.makeSuite(InstallHeadersTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_install_lib.py b/lib-python/2.7/distutils/tests/test_install_lib.py --- a/lib-python/2.7/distutils/tests/test_install_lib.py +++ b/lib-python/2.7/distutils/tests/test_install_lib.py @@ -7,6 +7,7 @@ from distutils.extension import Extension from distutils.tests import support from distutils.errors import DistutilsOptionError +from test.test_support import run_unittest class InstallLibTestCase(support.TempdirManager, support.LoggingSilencer, @@ -103,4 +104,4 @@ return unittest.makeSuite(InstallLibTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_install_scripts.py b/lib-python/2.7/distutils/tests/test_install_scripts.py --- a/lib-python/2.7/distutils/tests/test_install_scripts.py +++ b/lib-python/2.7/distutils/tests/test_install_scripts.py @@ -7,6 +7,7 @@ from distutils.core import Distribution from distutils.tests import support +from test.test_support import run_unittest class InstallScriptsTestCase(support.TempdirManager, @@ -78,4 +79,4 @@ return unittest.makeSuite(InstallScriptsTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -5,6 +5,7 @@ from distutils.errors import DistutilsPlatformError from distutils.tests import support +from test.test_support import run_unittest _MANIFEST = """\ @@ -137,4 +138,4 @@ return unittest.makeSuite(msvc9compilerTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_register.py b/lib-python/2.7/distutils/tests/test_register.py --- a/lib-python/2.7/distutils/tests/test_register.py +++ b/lib-python/2.7/distutils/tests/test_register.py @@ -7,7 +7,7 @@ import urllib2 import warnings -from test.test_support import check_warnings +from test.test_support import check_warnings, run_unittest from distutils.command import register as register_module from distutils.command.register import register @@ -138,7 +138,7 @@ # let's see what the server received : we should # have 2 similar requests - self.assertTrue(self.conn.reqs, 2) + self.assertEqual(len(self.conn.reqs), 2) req1 = dict(self.conn.reqs[0].headers) req2 = dict(self.conn.reqs[1].headers) self.assertEqual(req2['Content-length'], req1['Content-length']) @@ -168,7 +168,7 @@ del register_module.raw_input # we should have send a request - self.assertTrue(self.conn.reqs, 1) + self.assertEqual(len(self.conn.reqs), 1) req = self.conn.reqs[0] headers = dict(req.headers) self.assertEqual(headers['Content-length'], '608') @@ -186,7 +186,7 @@ del register_module.raw_input # we should have send a request - self.assertTrue(self.conn.reqs, 1) + self.assertEqual(len(self.conn.reqs), 1) req = self.conn.reqs[0] headers = dict(req.headers) self.assertEqual(headers['Content-length'], '290') @@ -258,4 +258,4 @@ return unittest.makeSuite(RegisterTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_sdist.py b/lib-python/2.7/distutils/tests/test_sdist.py --- a/lib-python/2.7/distutils/tests/test_sdist.py +++ b/lib-python/2.7/distutils/tests/test_sdist.py @@ -24,11 +24,9 @@ import tempfile import warnings -from test.test_support import check_warnings -from test.test_support import captured_stdout +from test.test_support import captured_stdout, check_warnings, run_unittest -from distutils.command.sdist import sdist -from distutils.command.sdist import show_formats +from distutils.command.sdist import sdist, show_formats from distutils.core import Distribution from distutils.tests.test_config import PyPIRCCommandTestCase from distutils.errors import DistutilsExecError, DistutilsOptionError @@ -372,7 +370,7 @@ # adding a file self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#') - # make sure build_py is reinitinialized, like a fresh run + # make sure build_py is reinitialized, like a fresh run build_py = dist.get_command_obj('build_py') build_py.finalized = False build_py.ensure_finalized() @@ -390,6 +388,7 @@ self.assertEqual(len(manifest2), 6) self.assertIn('doc2.txt', manifest2[-1]) + @unittest.skipUnless(zlib, "requires zlib") def test_manifest_marker(self): # check that autogenerated MANIFESTs have a marker dist, cmd = self.get_cmd() @@ -406,6 +405,7 @@ self.assertEqual(manifest[0], '# file GENERATED by distutils, do NOT edit') + @unittest.skipUnless(zlib, "requires zlib") def test_manual_manifest(self): # check that a MANIFEST without a marker is left alone dist, cmd = self.get_cmd() @@ -426,4 +426,4 @@ return unittest.makeSuite(SDistTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_spawn.py b/lib-python/2.7/distutils/tests/test_spawn.py --- a/lib-python/2.7/distutils/tests/test_spawn.py +++ b/lib-python/2.7/distutils/tests/test_spawn.py @@ -2,7 +2,7 @@ import unittest import os import time -from test.test_support import captured_stdout +from test.test_support import captured_stdout, run_unittest from distutils.spawn import _nt_quote_args from distutils.spawn import spawn, find_executable @@ -57,4 +57,4 @@ return unittest.makeSuite(SpawnTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_text_file.py b/lib-python/2.7/distutils/tests/test_text_file.py --- a/lib-python/2.7/distutils/tests/test_text_file.py +++ b/lib-python/2.7/distutils/tests/test_text_file.py @@ -3,6 +3,7 @@ import unittest from distutils.text_file import TextFile from distutils.tests import support +from test.test_support import run_unittest TEST_DATA = """# test file @@ -103,4 +104,4 @@ return unittest.makeSuite(TextFileTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_unixccompiler.py b/lib-python/2.7/distutils/tests/test_unixccompiler.py --- a/lib-python/2.7/distutils/tests/test_unixccompiler.py +++ b/lib-python/2.7/distutils/tests/test_unixccompiler.py @@ -1,6 +1,7 @@ """Tests for distutils.unixccompiler.""" import sys import unittest +from test.test_support import run_unittest from distutils import sysconfig from distutils.unixccompiler import UnixCCompiler @@ -126,4 +127,4 @@ return unittest.makeSuite(UnixCCompilerTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -1,14 +1,13 @@ +# -*- encoding: utf8 -*- """Tests for distutils.command.upload.""" -# -*- encoding: utf8 -*- -import sys import os import unittest +from test.test_support import run_unittest from distutils.command import upload as upload_mod from distutils.command.upload import upload from distutils.core import Distribution -from distutils.tests import support from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase PYPIRC_LONG_PASSWORD = """\ @@ -129,4 +128,4 @@ return unittest.makeSuite(uploadTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_util.py b/lib-python/2.7/distutils/tests/test_util.py --- a/lib-python/2.7/distutils/tests/test_util.py +++ b/lib-python/2.7/distutils/tests/test_util.py @@ -1,6 +1,7 @@ """Tests for distutils.util.""" import sys import unittest +from test.test_support import run_unittest from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError from distutils.util import byte_compile @@ -21,4 +22,4 @@ return unittest.makeSuite(UtilTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_version.py b/lib-python/2.7/distutils/tests/test_version.py --- a/lib-python/2.7/distutils/tests/test_version.py +++ b/lib-python/2.7/distutils/tests/test_version.py @@ -2,6 +2,7 @@ import unittest from distutils.version import LooseVersion from distutils.version import StrictVersion +from test.test_support import run_unittest class VersionTestCase(unittest.TestCase): @@ -67,4 +68,4 @@ return unittest.makeSuite(VersionTestCase) if __name__ == "__main__": - unittest.main(defaultTest="test_suite") + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/tests/test_versionpredicate.py b/lib-python/2.7/distutils/tests/test_versionpredicate.py --- a/lib-python/2.7/distutils/tests/test_versionpredicate.py +++ b/lib-python/2.7/distutils/tests/test_versionpredicate.py @@ -4,6 +4,10 @@ import distutils.versionpredicate import doctest +from test.test_support import run_unittest def test_suite(): return doctest.DocTestSuite(distutils.versionpredicate) + +if __name__ == '__main__': + run_unittest(test_suite()) diff --git a/lib-python/2.7/distutils/util.py b/lib-python/2.7/distutils/util.py --- a/lib-python/2.7/distutils/util.py +++ b/lib-python/2.7/distutils/util.py @@ -97,9 +97,7 @@ from distutils.sysconfig import get_config_vars cfgvars = get_config_vars() - macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET') - if not macver: - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if 1: # Always calculate the release of the running machine, diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -1217,7 +1217,7 @@ # Process each example. for examplenum, example in enumerate(test.examples): - # If REPORT_ONLY_FIRST_FAILURE is set, then supress + # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) @@ -2186,7 +2186,7 @@ caller can catch the errors and initiate post-mortem debugging. The DocTestCase provides a debug method that raises - UnexpectedException errors if there is an unexepcted + UnexpectedException errors if there is an unexpected exception: >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', diff --git a/lib-python/2.7/email/charset.py b/lib-python/2.7/email/charset.py --- a/lib-python/2.7/email/charset.py +++ b/lib-python/2.7/email/charset.py @@ -209,7 +209,7 @@ input_charset = unicode(input_charset, 'ascii') except UnicodeError: raise errors.CharsetError(input_charset) - input_charset = input_charset.lower() + input_charset = input_charset.lower().encode('ascii') # Set the input charset after filtering through the aliases and/or codecs if not (input_charset in ALIASES or input_charset in CHARSETS): try: diff --git a/lib-python/2.7/email/generator.py b/lib-python/2.7/email/generator.py --- a/lib-python/2.7/email/generator.py +++ b/lib-python/2.7/email/generator.py @@ -202,18 +202,13 @@ g = self.clone(s) g.flatten(part, unixfrom=False) msgtexts.append(s.getvalue()) - # Now make sure the boundary we've selected doesn't appear in any of - # the message texts. - alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? - boundary = msg.get_boundary(failobj=_make_boundary(alltext)) - # If we had to calculate a new boundary because the body text - # contained that string, set the new boundary. We don't do it - # unconditionally because, while set_boundary() preserves order, it - # doesn't preserve newlines/continuations in headers. This is no big - # deal in practice, but turns out to be inconvenient for the unittest - # suite. - if msg.get_boundary() != boundary: + boundary = msg.get_boundary() + if not boundary: + # Create a boundary that doesn't appear in any of the + # message texts. + alltext = NL.join(msgtexts) + boundary = _make_boundary(alltext) msg.set_boundary(boundary) # If there's a preamble, write it out, with a trailing CRLF if msg.preamble is not None: @@ -292,7 +287,7 @@ _FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]' class DecodedGenerator(Generator): - """Generator a text representation of a message. + """Generates a text representation of a message. Like the Generator base class, except that non-text parts are substituted with a format string representing the part. diff --git a/lib-python/2.7/email/header.py b/lib-python/2.7/email/header.py --- a/lib-python/2.7/email/header.py +++ b/lib-python/2.7/email/header.py @@ -47,6 +47,10 @@ # For use with .match() fcre = re.compile(r'[\041-\176]+:$') +# Find a header embedded in a putative header value. Used to check for +# header injection attack. +_embeded_header = re.compile(r'\n[^ \t]+:') + # Helpers @@ -403,7 +407,11 @@ newchunks += self._split(s, charset, targetlen, splitchars) lastchunk, lastcharset = newchunks[-1] lastlen = lastcharset.encoded_header_len(lastchunk) - return self._encode_chunks(newchunks, maxlinelen) + value = self._encode_chunks(newchunks, maxlinelen) + if _embeded_header.search(value): + raise HeaderParseError("header value appears to contain " + "an embedded header: {!r}".format(value)) + return value diff --git a/lib-python/2.7/email/message.py b/lib-python/2.7/email/message.py --- a/lib-python/2.7/email/message.py +++ b/lib-python/2.7/email/message.py @@ -38,7 +38,9 @@ def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. - This will quote the value if needed or if quote is true. + This will quote the value if needed or if quote is true. If value is a + three tuple (charset, language, value), it will be encoded according + to RFC2231 rules. """ if value is not None and len(value) > 0: # A tuple is used for RFC 2231 encoded parameter values where items @@ -97,7 +99,7 @@ objects, otherwise it is a string. Message objects implement part of the `mapping' interface, which assumes - there is exactly one occurrance of the header per message. Some headers + there is exactly one occurrence of the header per message. Some headers do in fact appear multiple times (e.g. Received) and for those headers, you must use the explicit API to set or get all the headers. Not all of the mapping methods are implemented. @@ -286,7 +288,7 @@ Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which - occurrance gets returned is undefined. Use get_all() to get all + occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name. """ return self.get(name) @@ -389,7 +391,10 @@ name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless - value is None, in which case only the key will be added. + value is None, in which case only the key will be added. If a + parameter value contains non-ASCII characters it must be specified as a + three-tuple of (charset, language, value), in which case it will be + encoded according to RFC2231 rules. Example: diff --git a/lib-python/2.7/email/mime/application.py b/lib-python/2.7/email/mime/application.py --- a/lib-python/2.7/email/mime/application.py +++ b/lib-python/2.7/email/mime/application.py @@ -17,7 +17,7 @@ _encoder=encoders.encode_base64, **_params): """Create an application/* type MIME document. - _data is a string containing the raw applicatoin data. + _data is a string containing the raw application data. _subtype is the MIME content type subtype, defaulting to 'octet-stream'. diff --git a/lib-python/2.7/email/test/data/msg_26.txt b/lib-python/2.7/email/test/data/msg_26.txt --- a/lib-python/2.7/email/test/data/msg_26.txt +++ b/lib-python/2.7/email/test/data/msg_26.txt @@ -42,4 +42,4 @@ MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA AAAAAAAIAAAAAAAAAIAAAAAA ---1618492860--2051301190--113853680-- +--1618492860--2051301190--113853680-- \ No newline at end of file diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -179,6 +179,17 @@ self.assertRaises(Errors.HeaderParseError, msg.set_boundary, 'BOUNDARY') + def test_make_boundary(self): + msg = MIMEMultipart('form-data') + # Note that when the boundary gets created is an implementation + # detail and might change. + self.assertEqual(msg.items()[0][1], 'multipart/form-data') + # Trigger creation of boundary + msg.as_string() + self.assertEqual(msg.items()[0][1][:33], + 'multipart/form-data; boundary="==') + # XXX: there ought to be tests of the uniqueness of the boundary, too. + def test_message_rfc822_only(self): # Issue 7970: message/rfc822 not in multipart parsed by # HeaderParser caused an exception when flattened. @@ -542,6 +553,17 @@ msg.set_charset(u'us-ascii') self.assertEqual('us-ascii', msg.get_content_charset()) + # Issue 5871: reject an attempt to embed a header inside a header value + # (header injection attack). + def test_embeded_header_via_Header_rejected(self): + msg = Message() + msg['Dummy'] = Header('dummy\nX-Injected-Header: test') + self.assertRaises(Errors.HeaderParseError, msg.as_string) + + def test_embeded_header_via_string_rejected(self): + msg = Message() + msg['Dummy'] = 'dummy\nX-Injected-Header: test' + self.assertRaises(Errors.HeaderParseError, msg.as_string) # Test the email.Encoders module @@ -3113,6 +3135,28 @@ s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?=' raises(Errors.HeaderParseError, decode_header, s) + # Issue 1078919 + def test_ascii_add_header(self): + msg = Message() + msg.add_header('Content-Disposition', 'attachment', + filename='bud.gif') + self.assertEqual('attachment; filename="bud.gif"', + msg['Content-Disposition']) + + def test_nonascii_add_header_via_triple(self): + msg = Message() + msg.add_header('Content-Disposition', 'attachment', + filename=('iso-8859-1', '', 'Fu\xdfballer.ppt')) + self.assertEqual( + 'attachment; filename*="iso-8859-1\'\'Fu%DFballer.ppt"', + msg['Content-Disposition']) + + def test_encode_unaliased_charset(self): + # Issue 1379416: when the charset has no output conversion, + # output was accidentally getting coerced to unicode. + res = Header('abc','iso-8859-2').encode() + self.assertEqual(res, '=?iso-8859-2?q?abc?=') + self.assertIsInstance(res, str) # Test RFC 2231 header parameters (en/de)coding diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -599,7 +599,7 @@ Usage example: >>> from ftplib import FTP_TLS >>> ftps = FTP_TLS('ftp.python.org') - >>> ftps.login() # login anonimously previously securing control channel + >>> ftps.login() # login anonymously previously securing control channel '230 Guest login ok, access restrictions apply.' >>> ftps.prot_p() # switch to secure data connection '200 Protection level set to P' diff --git a/lib-python/2.7/functools.py b/lib-python/2.7/functools.py --- a/lib-python/2.7/functools.py +++ b/lib-python/2.7/functools.py @@ -53,17 +53,17 @@ def total_ordering(cls): """Class decorator that fills in missing ordering methods""" convert = { - '__lt__': [('__gt__', lambda self, other: other < self), - ('__le__', lambda self, other: not other < self), + '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), + ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], - '__le__': [('__ge__', lambda self, other: other <= self), - ('__lt__', lambda self, other: not other <= self), + '__le__': [('__ge__', lambda self, other: not self <= other or self == other), + ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], - '__gt__': [('__lt__', lambda self, other: other > self), - ('__ge__', lambda self, other: not other > self), + '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), + ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], - '__ge__': [('__le__', lambda self, other: other >= self), - ('__gt__', lambda self, other: not other >= self), + '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), + ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) @@ -80,6 +80,7 @@ def cmp_to_key(mycmp): """Convert a cmp= function into a key= function""" class K(object): + __slots__ = ['obj'] def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): diff --git a/lib-python/2.7/getpass.py b/lib-python/2.7/getpass.py --- a/lib-python/2.7/getpass.py +++ b/lib-python/2.7/getpass.py @@ -62,7 +62,7 @@ try: old = termios.tcgetattr(fd) # a copy to save new = old[:] - new[3] &= ~(termios.ECHO|termios.ISIG) # 3 == 'lflags' + new[3] &= ~termios.ECHO # 3 == 'lflags' tcsetattr_flags = termios.TCSAFLUSH if hasattr(termios, 'TCSASOFT'): tcsetattr_flags |= termios.TCSASOFT diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -316,7 +316,7 @@ # Note: we unconditionally convert both msgids and msgstrs to # Unicode using the character encoding specified in the charset # parameter of the Content-Type header. The gettext documentation - # strongly encourages msgids to be us-ascii, but some appliations + # strongly encourages msgids to be us-ascii, but some applications # require alternative encodings (e.g. Zope's ZCML and ZPT). For # traditional gettext applications, the msgid conversion will # cause no problems since us-ascii should always be a subset of diff --git a/lib-python/2.7/hashlib.py b/lib-python/2.7/hashlib.py --- a/lib-python/2.7/hashlib.py +++ b/lib-python/2.7/hashlib.py @@ -64,26 +64,29 @@ def __get_builtin_constructor(name): - if name in ('SHA1', 'sha1'): - import _sha - return _sha.new - elif name in ('MD5', 'md5'): - import _md5 - return _md5.new - elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): - import _sha256 - bs = name[3:] - if bs == '256': - return _sha256.sha256 - elif bs == '224': - return _sha256.sha224 - elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): - import _sha512 - bs = name[3:] - if bs == '512': - return _sha512.sha512 - elif bs == '384': - return _sha512.sha384 + try: + if name in ('SHA1', 'sha1'): + import _sha + return _sha.new + elif name in ('MD5', 'md5'): + import _md5 + return _md5.new + elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): + import _sha256 + bs = name[3:] + if bs == '256': + return _sha256.sha256 + elif bs == '224': + return _sha256.sha224 + elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): + import _sha512 + bs = name[3:] + if bs == '512': + return _sha512.sha512 + elif bs == '384': + return _sha512.sha384 + except ImportError: + pass # no extension module, this hash is unsupported. raise ValueError('unsupported hash type %s' % name) diff --git a/lib-python/2.7/heapq.py b/lib-python/2.7/heapq.py --- a/lib-python/2.7/heapq.py +++ b/lib-python/2.7/heapq.py @@ -133,6 +133,11 @@ from operator import itemgetter import bisect +def cmp_lt(x, y): + # Use __lt__ if available; otherwise, try __le__. + # In Py3.x, only __lt__ will be called. + return (x < y) if hasattr(x, '__lt__') else (not y <= x) + def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) @@ -167,13 +172,13 @@ def heappushpop(heap, item): """Fast version of a heappush followed by a heappop.""" - if heap and heap[0] < item: + if heap and cmp_lt(heap[0], item): item, heap[0] = heap[0], item _siftup(heap, 0) return item def heapify(x): - """Transform list into a heap, in-place, in O(len(heap)) time.""" + """Transform list into a heap, in-place, in O(len(x)) time.""" n = len(x) # Transform bottom-up. The largest index there's any point to looking at # is the largest with a child index in-range, so must have 2*i + 1 < n, @@ -215,11 +220,10 @@ pop = result.pop los = result[-1] # los --> Largest of the nsmallest for elem in it: - if los <= elem: - continue - insort(result, elem) - pop() - los = result[-1] + if cmp_lt(elem, los): + insort(result, elem) + pop() + los = result[-1] return result # An alternative approach manifests the whole iterable in memory but # saves comparisons by heapifying all at once. Also, saves time @@ -240,7 +244,7 @@ while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] - if newitem < parent: + if cmp_lt(newitem, parent): heap[pos] = parent pos = parentpos continue @@ -295,7 +299,7 @@ while childpos < endpos: # Set childpos to index of smaller child. rightpos = childpos + 1 - if rightpos < endpos and not heap[childpos] < heap[rightpos]: + if rightpos < endpos and not cmp_lt(heap[childpos], heap[rightpos]): childpos = rightpos # Move the smaller child up. heap[pos] = heap[childpos] @@ -364,7 +368,7 @@ return [min(chain(head, it))] return [min(chain(head, it), key=key)] - # When n>=size, it's faster to use sort() + # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): @@ -402,7 +406,7 @@ return [max(chain(head, it))] return [max(chain(head, it), key=key)] - # When n>=size, it's faster to use sort() + # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -212,6 +212,9 @@ # maximal amount of data to read at one time in _safe_read MAXAMOUNT = 1048576 +# maximal line length when calling readline(). +_MAXLINE = 65536 + class HTTPMessage(mimetools.Message): def addheader(self, key, value): @@ -274,7 +277,9 @@ except IOError: startofline = tell = None self.seekable = 0 - line = self.fp.readline() + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") if not line: self.status = 'EOF in headers' break @@ -404,7 +409,10 @@ break # skip the header from the 100 response while True: - skip = self.fp.readline().strip() + skip = self.fp.readline(_MAXLINE + 1) + if len(skip) > _MAXLINE: + raise LineTooLong("header line") + skip = skip.strip() if not skip: break if self.debuglevel > 0: @@ -563,7 +571,9 @@ value = [] while True: if chunk_left is None: - line = self.fp.readline() + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("chunk size") i = line.find(';') if i >= 0: line = line[:i] # strip chunk-extensions @@ -598,7 +608,9 @@ # read and discard trailer up to the CRLF terminator ### note: we shouldn't have any trailers! while True: - line = self.fp.readline() + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("trailer line") if not line: # a vanishingly small number of sites EOF without # sending the trailer @@ -730,7 +742,9 @@ raise socket.error("Tunnel connection failed: %d %s" % (code, message.strip())) while True: - line = response.fp.readline() + line = response.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") if line == '\r\n': break @@ -790,7 +804,7 @@ del self._buffer[:] # If msg and message_body are sent in a single send() call, # it will avoid performance problems caused by the interaction - # between delayed ack and the Nagle algorithim. + # between delayed ack and the Nagle algorithm. if isinstance(message_body, str): msg += message_body message_body = None @@ -1233,6 +1247,11 @@ self.args = line, self.line = line +class LineTooLong(HTTPException): + def __init__(self, line_type): + HTTPException.__init__(self, "got more than %d bytes when reading %s" + % (_MAXLINE, line_type)) + # for backwards compatibility error = HTTPException diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -98,14 +98,6 @@ # menu del menudefs[-1][1][0:2] - menudefs.insert(0, - ('application', [ - ('About IDLE', '<>'), - None, - ('_Preferences....', '<>'), - ])) - - default_keydefs = idleConf.GetCurrentKeySet() del sys diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -48,6 +48,21 @@ path = module.__path__ except AttributeError: raise ImportError, 'No source for module ' + module.__name__ + if descr[2] != imp.PY_SOURCE: + # If all of the above fails and didn't raise an exception,fallback + # to a straight import which can find __init__.py in a package. + m = __import__(fullname) + try: + filename = m.__file__ + except AttributeError: + pass + else: + file = None + base, ext = os.path.splitext(filename) + if ext == '.pyc': + ext = '.py' + filename = base + ext + descr = filename, None, imp.PY_SOURCE return file, filename, descr class EditorWindow(object): @@ -102,8 +117,8 @@ self.top = top = WindowList.ListedToplevel(root, menu=self.menubar) if flist: self.tkinter_vars = flist.vars - #self.top.instance_dict makes flist.inversedict avalable to - #configDialog.py so it can access all EditorWindow instaces + #self.top.instance_dict makes flist.inversedict available to + #configDialog.py so it can access all EditorWindow instances self.top.instance_dict = flist.inversedict else: self.tkinter_vars = {} # keys: Tkinter event names @@ -136,6 +151,14 @@ if macosxSupport.runningAsOSXApp(): # Command-W on editorwindows doesn't work without this. text.bind('<>', self.close_event) + # Some OS X systems have only one mouse button, + # so use control-click for pulldown menus there. + # (Note, AquaTk defines <2> as the right button if + # present and the Tk Text widget already binds <2>.) + text.bind("",self.right_menu_event) + else: + # Elsewhere, use right-click for pulldown menus. + text.bind("<3>",self.right_menu_event) text.bind("<>", self.cut) text.bind("<>", self.copy) text.bind("<>", self.paste) @@ -154,7 +177,6 @@ text.bind("<>", self.find_selection_event) text.bind("<>", self.replace_event) text.bind("<>", self.goto_line_event) - text.bind("<3>", self.right_menu_event) text.bind("<>",self.smart_backspace_event) text.bind("<>",self.newline_and_indent_event) text.bind("<>",self.smart_indent_event) @@ -300,13 +322,13 @@ return "break" def home_callback(self, event): - if (event.state & 12) != 0 and event.keysym == "Home": - # state&1==shift, state&4==control, state&8==alt - return # ; fall back to class binding - + if (event.state & 4) != 0 and event.keysym == "Home": + # state&4==Control. If , use the Tk binding. + return if self.text.index("iomark") and \ self.text.compare("iomark", "<=", "insert lineend") and \ self.text.compare("insert linestart", "<=", "iomark"): + # In Shell on input line, go to just after prompt insertpt = int(self.text.index("iomark").split(".")[1]) else: line = self.text.get("insert linestart", "insert lineend") @@ -315,30 +337,27 @@ break else: insertpt=len(line) - lineat = int(self.text.index("insert").split('.')[1]) - if insertpt == lineat: insertpt = 0 - dest = "insert linestart+"+str(insertpt)+"c" - if (event.state&1) == 0: - # shift not pressed + # shift was not pressed self.text.tag_remove("sel", "1.0", "end") else: if not self.text.index("sel.first"): - self.text.mark_set("anchor","insert") - + self.text.mark_set("my_anchor", "insert") # there was no previous selection + else: + if self.text.compare(self.text.index("sel.first"), "<", self.text.index("insert")): + self.text.mark_set("my_anchor", "sel.first") # extend back + else: + self.text.mark_set("my_anchor", "sel.last") # extend forward first = self.text.index(dest) - last = self.text.index("anchor") - + last = self.text.index("my_anchor") if self.text.compare(first,">",last): first,last = last,first - self.text.tag_remove("sel", "1.0", "end") self.text.tag_add("sel", first, last) - self.text.mark_set("insert", dest) self.text.see("insert") return "break" @@ -385,7 +404,7 @@ menudict[name] = menu = Menu(mbar, name=name) mbar.add_cascade(label=label, menu=menu, underline=underline) - if macosxSupport.runningAsOSXApp(): + if macosxSupport.isCarbonAquaTk(self.root): # Insert the application menu menudict['application'] = menu = Menu(mbar, name='apple') mbar.add_cascade(label='IDLE', menu=menu) @@ -445,7 +464,11 @@ def python_docs(self, event=None): if sys.platform[:3] == 'win': - os.startfile(self.help_url) + try: + os.startfile(self.help_url) + except WindowsError as why: + tkMessageBox.showerror(title='Document Start Failure', + message=str(why), parent=self.text) else: webbrowser.open(self.help_url) return "break" @@ -740,9 +763,13 @@ "Create a callback with the helpfile value frozen at definition time" def display_extra_help(helpfile=helpfile): if not helpfile.startswith(('www', 'http')): - url = os.path.normpath(helpfile) + helpfile = os.path.normpath(helpfile) if sys.platform[:3] == 'win': - os.startfile(helpfile) + try: + os.startfile(helpfile) + except WindowsError as why: + tkMessageBox.showerror(title='Document Start Failure', + message=str(why), parent=self.text) else: webbrowser.open(helpfile) return display_extra_help @@ -1526,7 +1553,12 @@ def get_accelerator(keydefs, eventname): keylist = keydefs.get(eventname) - if not keylist: + # issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5 + # if not keylist: + if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in { + "<>", + "<>", + "<>"}): return "" s = keylist[0] s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s) diff --git a/lib-python/2.7/idlelib/FileList.py b/lib-python/2.7/idlelib/FileList.py --- a/lib-python/2.7/idlelib/FileList.py +++ b/lib-python/2.7/idlelib/FileList.py @@ -43,7 +43,7 @@ def new(self, filename=None): return self.EditorWindow(self, filename) - def close_all_callback(self, event): + def close_all_callback(self, *args, **kwds): for edit in self.inversedict.keys(): reply = edit.close() if reply == "cancel": diff --git a/lib-python/2.7/idlelib/FormatParagraph.py b/lib-python/2.7/idlelib/FormatParagraph.py --- a/lib-python/2.7/idlelib/FormatParagraph.py +++ b/lib-python/2.7/idlelib/FormatParagraph.py @@ -54,7 +54,7 @@ # If the block ends in a \n, we dont want the comment # prefix inserted after it. (Im not sure it makes sense to # reformat a comment block that isnt made of complete - # lines, but whatever!) Can't think of a clean soltution, + # lines, but whatever!) Can't think of a clean solution, # so we hack away block_suffix = "" if not newdata[-1]: diff --git a/lib-python/2.7/idlelib/HISTORY.txt b/lib-python/2.7/idlelib/HISTORY.txt --- a/lib-python/2.7/idlelib/HISTORY.txt +++ b/lib-python/2.7/idlelib/HISTORY.txt @@ -13,7 +13,7 @@ - New tarball released as a result of the 'revitalisation' of the IDLEfork project. -- This release requires python 2.1 or better. Compatability with earlier +- This release requires python 2.1 or better. Compatibility with earlier versions of python (especially ancient ones like 1.5x) is no longer a priority in IDLEfork development. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -320,17 +320,20 @@ return "yes" message = "Do you want to save %s before closing?" % ( self.filename or "this untitled document") - m = tkMessageBox.Message( - title="Save On Close", - message=message, - icon=tkMessageBox.QUESTION, - type=tkMessageBox.YESNOCANCEL, - master=self.text) - reply = m.show() - if reply == "yes": + confirm = tkMessageBox.askyesnocancel( + title="Save On Close", + message=message, + default=tkMessageBox.YES, + master=self.text) + if confirm: + reply = "yes" self.save(None) if not self.get_saved(): reply = "cancel" + elif confirm is None: + reply = "cancel" + else: + reply = "no" self.text.focus_set() return reply @@ -339,7 +342,7 @@ self.save_as(event) else: if self.writefile(self.filename): - self.set_saved(1) + self.set_saved(True) try: self.editwin.store_file_breaks() except AttributeError: # may be a PyShell @@ -465,15 +468,12 @@ self.text.insert("end-1c", "\n") def print_window(self, event): - m = tkMessageBox.Message( - title="Print", - message="Print to Default Printer", - icon=tkMessageBox.QUESTION, - type=tkMessageBox.OKCANCEL, - default=tkMessageBox.OK, - master=self.text) - reply = m.show() - if reply != tkMessageBox.OK: + confirm = tkMessageBox.askokcancel( + title="Print", + message="Print to Default Printer", + default=tkMessageBox.OK, + master=self.text) + if not confirm: self.text.focus_set() return "break" tempfilename = None @@ -488,8 +488,8 @@ if not self.writefile(tempfilename): os.unlink(tempfilename) return "break" - platform=os.name - printPlatform=1 + platform = os.name + printPlatform = True if platform == 'posix': #posix platform command = idleConf.GetOption('main','General', 'print-command-posix') @@ -497,7 +497,7 @@ elif platform == 'nt': #win32 platform command = idleConf.GetOption('main','General','print-command-win') else: #no printing for this platform - printPlatform=0 + printPlatform = False if printPlatform: #we can try to print for this platform command = command % filename pipe = os.popen(command, "r") @@ -511,7 +511,7 @@ output = "Printing command: %s\n" % repr(command) + output tkMessageBox.showerror("Print status", output, master=self.text) else: #no printing for this platform - message="Printing is not enabled for this platform: %s" % platform + message = "Printing is not enabled for this platform: %s" % platform tkMessageBox.showinfo("Print status", message, master=self.text) if tempfilename: os.unlink(tempfilename) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,3 +1,18 @@ +What's New in IDLE 2.7.2? +======================= + +*Release date: 29-May-2011* + +- Issue #6378: Further adjust idle.bat to start associated Python + +- Issue #11896: Save on Close failed despite selecting "Yes" in dialog. + +- toggle failing on Tk 8.5, causing IDLE exits and strange selection + behavior. Issue 4676. Improve selection extension behaviour. + +- toggle non-functional when NumLock set on Windows. Issue 3851. + + What's New in IDLE 2.7? ======================= @@ -21,7 +36,7 @@ - Tk 8.5 Text widget requires 'wordprocessor' tabstyle attr to handle mixed space/tab properly. Issue 5129, patch by Guilherme Polo. - + - Issue #3549: On MacOS the preferences menu was not present diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py --- a/lib-python/2.7/idlelib/PyShell.py +++ b/lib-python/2.7/idlelib/PyShell.py @@ -1432,6 +1432,13 @@ shell.interp.prepend_syspath(script) shell.interp.execfile(script) + # Check for problematic OS X Tk versions and print a warning message + # in the IDLE shell window; this is less intrusive than always opening + # a separate window. + tkversionwarning = macosxSupport.tkVersionWarning(root) + if tkversionwarning: + shell.interp.runcommand(''.join(("print('", tkversionwarning, "')"))) + root.mainloop() root.destroy() diff --git a/lib-python/2.7/idlelib/ScriptBinding.py b/lib-python/2.7/idlelib/ScriptBinding.py --- a/lib-python/2.7/idlelib/ScriptBinding.py +++ b/lib-python/2.7/idlelib/ScriptBinding.py @@ -26,6 +26,7 @@ from idlelib import PyShell from idlelib.configHandler import idleConf +from idlelib import macosxSupport IDENTCHARS = string.ascii_letters + string.digits + "_" @@ -53,6 +54,9 @@ self.flist = self.editwin.flist self.root = self.editwin.root + if macosxSupport.runningAsOSXApp(): + self.editwin.text_frame.bind('<>', self._run_module_event) + def check_module_event(self, event): filename = self.getfilename() if not filename: @@ -166,6 +170,19 @@ interp.runcode(code) return 'break' + if macosxSupport.runningAsOSXApp(): + # Tk-Cocoa in MacOSX is broken until at least + # Tk 8.5.9, and without this rather + # crude workaround IDLE would hang when a user + # tries to run a module using the keyboard shortcut + # (the menu item works fine). + _run_module_event = run_module_event + + def run_module_event(self, event): + self.editwin.text_frame.after(200, + lambda: self.editwin.text_frame.event_generate('<>')) + return 'break' + def getfilename(self): """Get source filename. If not saved, offer to save (or create) file @@ -184,9 +201,9 @@ if autosave and filename: self.editwin.io.save(None) else: - reply = self.ask_save_dialog() + confirm = self.ask_save_dialog() self.editwin.text.focus_set() - if reply == "ok": + if confirm: self.editwin.io.save(None) filename = self.editwin.io.filename else: @@ -195,13 +212,11 @@ def ask_save_dialog(self): msg = "Source Must Be Saved\n" + 5*' ' + "OK to Save?" - mb = tkMessageBox.Message(title="Save Before Run or Check", - message=msg, - icon=tkMessageBox.QUESTION, - type=tkMessageBox.OKCANCEL, - default=tkMessageBox.OK, - master=self.editwin.text) - return mb.show() + confirm = tkMessageBox.askokcancel(title="Save Before Run or Check", + message=msg, + default=tkMessageBox.OK, + master=self.editwin.text) + return confirm def errorbox(self, title, message): # XXX This should really be a function of EditorWindow... diff --git a/lib-python/2.7/idlelib/config-keys.def b/lib-python/2.7/idlelib/config-keys.def --- a/lib-python/2.7/idlelib/config-keys.def +++ b/lib-python/2.7/idlelib/config-keys.def @@ -176,7 +176,7 @@ redo = close-window = restart-shell = -save-window-as-file = +save-window-as-file = close-all-windows = view-restart = tabify-region = @@ -208,7 +208,7 @@ open-module = find-selection = python-context-help = -save-copy-of-window-as-file = +save-copy-of-window-as-file = open-window-from-file = python-docs = diff --git a/lib-python/2.7/idlelib/extend.txt b/lib-python/2.7/idlelib/extend.txt --- a/lib-python/2.7/idlelib/extend.txt +++ b/lib-python/2.7/idlelib/extend.txt @@ -18,7 +18,7 @@ An IDLE extension class is instantiated with a single argument, `editwin', an EditorWindow instance. The extension cannot assume much -about this argument, but it is guarateed to have the following instance +about this argument, but it is guaranteed to have the following instance variables: text a Text instance (a widget) diff --git a/lib-python/2.7/idlelib/idle.bat b/lib-python/2.7/idlelib/idle.bat --- a/lib-python/2.7/idlelib/idle.bat +++ b/lib-python/2.7/idlelib/idle.bat @@ -1,4 +1,4 @@ @echo off rem Start IDLE using the appropriate Python interpreter set CURRDIR=%~dp0 -start "%CURRDIR%..\..\pythonw.exe" "%CURRDIR%idle.pyw" %1 %2 %3 %4 %5 %6 %7 %8 %9 +start "IDLE" "%CURRDIR%..\..\pythonw.exe" "%CURRDIR%idle.pyw" %1 %2 %3 %4 %5 %6 %7 %8 %9 diff --git a/lib-python/2.7/idlelib/idlever.py b/lib-python/2.7/idlelib/idlever.py --- a/lib-python/2.7/idlelib/idlever.py +++ b/lib-python/2.7/idlelib/idlever.py @@ -1,1 +1,1 @@ -IDLE_VERSION = "2.7.1" +IDLE_VERSION = "2.7.2" diff --git a/lib-python/2.7/idlelib/macosxSupport.py b/lib-python/2.7/idlelib/macosxSupport.py --- a/lib-python/2.7/idlelib/macosxSupport.py +++ b/lib-python/2.7/idlelib/macosxSupport.py @@ -4,6 +4,7 @@ """ import sys import Tkinter +from os import path _appbundle = None @@ -19,10 +20,41 @@ _appbundle = (sys.platform == 'darwin' and '.app' in sys.executable) return _appbundle +_carbonaquatk = None + +def isCarbonAquaTk(root): + """ + Returns True if IDLE is using a Carbon Aqua Tk (instead of the + newer Cocoa Aqua Tk). + """ + global _carbonaquatk + if _carbonaquatk is None: + _carbonaquatk = (runningAsOSXApp() and + 'aqua' in root.tk.call('tk', 'windowingsystem') and + 'AppKit' not in root.tk.call('winfo', 'server', '.')) + return _carbonaquatk + +def tkVersionWarning(root): + """ + Returns a string warning message if the Tk version in use appears to + be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5 + that was shipped with Mac OS X 10.6. + """ + + if (runningAsOSXApp() and + ('AppKit' in root.tk.call('winfo', 'server', '.')) and + (root.tk.call('info', 'patchlevel') == '8.5.7') ): + return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may" + r" be unstable.\n" + r"Visit http://www.python.org/download/mac/tcltk/" + r" for current information.") + else: + return False + def addOpenEventSupport(root, flist): """ - This ensures that the application will respont to open AppleEvents, which - makes is feaseable to use IDLE as the default application for python files. + This ensures that the application will respond to open AppleEvents, which + makes is feasible to use IDLE as the default application for python files. """ def doOpenFile(*args): for fn in args: @@ -79,9 +111,6 @@ WindowList.add_windows_to_menu(menu) WindowList.register_callback(postwindowsmenu) - menudict['application'] = menu = Menu(menubar, name='apple') - menubar.add_cascade(label='IDLE', menu=menu) - def about_dialog(event=None): from idlelib import aboutDialog aboutDialog.AboutDialog(root, 'About IDLE') @@ -91,41 +120,45 @@ root.instance_dict = flist.inversedict configDialog.ConfigDialog(root, 'Settings') + def help_dialog(event=None): + from idlelib import textView + fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt') + textView.view_file(root, 'Help', fn) root.bind('<>', about_dialog) root.bind('<>', config_dialog) + root.createcommand('::tk::mac::ShowPreferences', config_dialog) if flist: root.bind('<>', flist.close_all_callback) + # The binding above doesn't reliably work on all versions of Tk + # on MacOSX. Adding command definition below does seem to do the + # right thing for now. + root.createcommand('exit', flist.close_all_callback) - ###check if Tk version >= 8.4.14; if so, use hard-coded showprefs binding - tkversion = root.tk.eval('info patchlevel') - # Note: we cannot check if the string tkversion >= '8.4.14', because - # the string '8.4.7' is greater than the string '8.4.14'. - if tuple(map(int, tkversion.split('.'))) >= (8, 4, 14): - Bindings.menudefs[0] = ('application', [ + if isCarbonAquaTk(root): + # for Carbon AquaTk, replace the default Tk apple menu + menudict['application'] = menu = Menu(menubar, name='apple') + menubar.add_cascade(label='IDLE', menu=menu) + Bindings.menudefs.insert(0, + ('application', [ ('About IDLE', '<>'), - None, - ]) - root.createcommand('::tk::mac::ShowPreferences', config_dialog) + None, + ])) + tkversion = root.tk.eval('info patchlevel') + if tuple(map(int, tkversion.split('.'))) < (8, 4, 14): + # for earlier AquaTk versions, supply a Preferences menu item + Bindings.menudefs[0][1].append( + ('_Preferences....', '<>'), + ) else: - for mname, entrylist in Bindings.menudefs: - menu = menudict.get(mname) - if not menu: - continue - else: - for entry in entrylist: - if not entry: - menu.add_separator() - else: - label, eventname = entry - underline, label = prepstr(label) - accelerator = get_accelerator(Bindings.default_keydefs, - eventname) - def command(text=root, eventname=eventname): - text.event_generate(eventname) - menu.add_command(label=label, underline=underline, - command=command, accelerator=accelerator) + # assume Cocoa AquaTk + # replace default About dialog with About IDLE one + root.createcommand('tkAboutDialog', about_dialog) + # replace default "Help" item in Help menu + root.createcommand('::tk::mac::ShowHelp', help_dialog) + # remove redundant "IDLE Help" from menu + del Bindings.menudefs[-1][1][0] def setupApp(root, flist): """ diff --git a/lib-python/2.7/imaplib.py b/lib-python/2.7/imaplib.py --- a/lib-python/2.7/imaplib.py +++ b/lib-python/2.7/imaplib.py @@ -1158,28 +1158,17 @@ self.port = port self.sock = socket.create_connection((host, port)) self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile) + self.file = self.sslobj.makefile('rb') def read(self, size): """Read 'size' bytes from remote.""" - # sslobj.read() sometimes returns < size bytes - chunks = [] - read = 0 - while read < size: - data = self.sslobj.read(min(size-read, 16384)) - read += len(data) - chunks.append(data) - - return ''.join(chunks) + return self.file.read(size) def readline(self): """Read line from remote.""" - line = [] - while 1: - char = self.sslobj.read(1) - line.append(char) - if char in ("\n", ""): return ''.join(line) + return self.file.readline() def send(self, data): @@ -1195,6 +1184,7 @@ def shutdown(self): """Close I/O established in "open".""" + self.file.close() self.sock.close() @@ -1321,9 +1311,10 @@ 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12} def Internaldate2tuple(resp): - """Convert IMAP4 INTERNALDATE to UT. + """Parse an IMAP4 INTERNALDATE string. - Returns Python time module tuple. + Return corresponding local time. The return value is a + time.struct_time instance or None if the string has wrong format. """ mo = InternalDate.match(resp) @@ -1390,9 +1381,14 @@ def Time2Internaldate(date_time): - """Convert 'date_time' to IMAP4 INTERNALDATE representation. + """Convert date_time to IMAP4 INTERNALDATE representation. - Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"' + Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The + date_time argument can be a number (int or float) representing + seconds since epoch (as returned by time.time()), a 9-tuple + representing local time (as returned by time.localtime()), or a + double-quoted string. In the last case, it is assumed to already + be in the correct format. """ if isinstance(date_time, (int, float)): diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -943,8 +943,14 @@ f_name, 'at most' if defaults else 'exactly', num_args, 'arguments' if num_args > 1 else 'argument', num_total)) elif num_args == 0 and num_total: - raise TypeError('%s() takes no arguments (%d given)' % - (f_name, num_total)) + if varkw: + if num_pos: + # XXX: We should use num_pos, but Python also uses num_total: + raise TypeError('%s() takes exactly 0 arguments ' + '(%d given)' % (f_name, num_total)) + else: + raise TypeError('%s() takes no arguments (%d given)' % + (f_name, num_total)) for arg in args: if isinstance(arg, str) and arg in named: if is_assigned(arg): diff --git a/lib-python/2.7/json/decoder.py b/lib-python/2.7/json/decoder.py --- a/lib-python/2.7/json/decoder.py +++ b/lib-python/2.7/json/decoder.py @@ -4,7 +4,7 @@ import sys import struct -from json.scanner import make_scanner +from json import scanner try: from _json import scanstring as c_scanstring except ImportError: @@ -161,6 +161,12 @@ nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) return pairs, end + 1 elif nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end)) @@ -350,7 +356,7 @@ self.parse_object = JSONObject self.parse_array = JSONArray self.parse_string = scanstring - self.scan_once = make_scanner(self) + self.scan_once = scanner.make_scanner(self) def decode(self, s, _w=WHITESPACE.match): """Return the Python representation of ``s`` (a ``str`` or ``unicode`` diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -251,7 +251,7 @@ if (_one_shot and c_make_encoder is not None - and not self.indent and not self.sort_keys): + and self.indent is None and not self.sort_keys): _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, diff --git a/lib-python/2.7/json/tests/__init__.py b/lib-python/2.7/json/tests/__init__.py --- a/lib-python/2.7/json/tests/__init__.py +++ b/lib-python/2.7/json/tests/__init__.py @@ -1,7 +1,46 @@ import os import sys +import json +import doctest import unittest -import doctest + +from test import test_support + +# import json with and without accelerations +cjson = test_support.import_fresh_module('json', fresh=['_json']) +pyjson = test_support.import_fresh_module('json', blocked=['_json']) + +# create two base classes that will be used by the other tests +class PyTest(unittest.TestCase): + json = pyjson + loads = staticmethod(pyjson.loads) + dumps = staticmethod(pyjson.dumps) + + at unittest.skipUnless(cjson, 'requires _json') +class CTest(unittest.TestCase): + if cjson is not None: + json = cjson + loads = staticmethod(cjson.loads) + dumps = staticmethod(cjson.dumps) + +# test PyTest and CTest checking if the functions come from the right module +class TestPyTest(PyTest): + def test_pyjson(self): + self.assertEqual(self.json.scanner.make_scanner.__module__, + 'json.scanner') + self.assertEqual(self.json.decoder.scanstring.__module__, + 'json.decoder') + self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__, + 'json.encoder') + +class TestCTest(CTest): + def test_cjson(self): + self.assertEqual(self.json.scanner.make_scanner.__module__, '_json') + self.assertEqual(self.json.decoder.scanstring.__module__, '_json') + self.assertEqual(self.json.encoder.c_make_encoder.__module__, '_json') + self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__, + '_json') + here = os.path.dirname(__file__) @@ -17,12 +56,11 @@ return suite def additional_tests(): - import json - import json.encoder - import json.decoder suite = unittest.TestSuite() for mod in (json, json.encoder, json.decoder): suite.addTest(doctest.DocTestSuite(mod)) + suite.addTest(TestPyTest('test_pyjson')) + suite.addTest(TestCTest('test_cjson')) return suite def main(): diff --git a/lib-python/2.7/json/tests/test_check_circular.py b/lib-python/2.7/json/tests/test_check_circular.py --- a/lib-python/2.7/json/tests/test_check_circular.py +++ b/lib-python/2.7/json/tests/test_check_circular.py @@ -1,30 +1,34 @@ -from unittest import TestCase -import json +from json.tests import PyTest, CTest + def default_iterable(obj): return list(obj) -class TestCheckCircular(TestCase): +class TestCheckCircular(object): def test_circular_dict(self): dct = {} dct['a'] = dct - self.assertRaises(ValueError, json.dumps, dct) + self.assertRaises(ValueError, self.dumps, dct) def test_circular_list(self): lst = [] lst.append(lst) - self.assertRaises(ValueError, json.dumps, lst) + self.assertRaises(ValueError, self.dumps, lst) def test_circular_composite(self): dct2 = {} dct2['a'] = [] dct2['a'].append(dct2) - self.assertRaises(ValueError, json.dumps, dct2) + self.assertRaises(ValueError, self.dumps, dct2) def test_circular_default(self): - json.dumps([set()], default=default_iterable) - self.assertRaises(TypeError, json.dumps, [set()]) + self.dumps([set()], default=default_iterable) + self.assertRaises(TypeError, self.dumps, [set()]) def test_circular_off_default(self): - json.dumps([set()], default=default_iterable, check_circular=False) - self.assertRaises(TypeError, json.dumps, [set()], check_circular=False) + self.dumps([set()], default=default_iterable, check_circular=False) + self.assertRaises(TypeError, self.dumps, [set()], check_circular=False) + + +class TestPyCheckCircular(TestCheckCircular, PyTest): pass +class TestCCheckCircular(TestCheckCircular, CTest): pass diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -1,18 +1,17 @@ import decimal -from unittest import TestCase from StringIO import StringIO +from collections import OrderedDict +from json.tests import PyTest, CTest -import json -from collections import OrderedDict -class TestDecode(TestCase): +class TestDecode(object): def test_decimal(self): - rval = json.loads('1.1', parse_float=decimal.Decimal) + rval = self.loads('1.1', parse_float=decimal.Decimal) self.assertTrue(isinstance(rval, decimal.Decimal)) self.assertEqual(rval, decimal.Decimal('1.1')) def test_float(self): - rval = json.loads('1', parse_int=float) + rval = self.loads('1', parse_int=float) self.assertTrue(isinstance(rval, float)) self.assertEqual(rval, 1.0) @@ -20,22 +19,32 @@ # Several optimizations were made that skip over calls to # the whitespace regex, so this test is designed to try and # exercise the uncommon cases. The array cases are already covered. - rval = json.loads('{ "key" : "value" , "k":"v" }') + rval = self.loads('{ "key" : "value" , "k":"v" }') self.assertEqual(rval, {"key":"value", "k":"v"}) + def test_empty_objects(self): + self.assertEqual(self.loads('{}'), {}) + self.assertEqual(self.loads('[]'), []) + self.assertEqual(self.loads('""'), u"") + self.assertIsInstance(self.loads('""'), unicode) + def test_object_pairs_hook(self): s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4), ("qrt", 5), ("pad", 6), ("hoy", 7)] - self.assertEqual(json.loads(s), eval(s)) - self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) - self.assertEqual(json.load(StringIO(s), - object_pairs_hook=lambda x: x), p) - od = json.loads(s, object_pairs_hook=OrderedDict) + self.assertEqual(self.loads(s), eval(s)) + self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p) + self.assertEqual(self.json.load(StringIO(s), + object_pairs_hook=lambda x: x), p) + od = self.loads(s, object_pairs_hook=OrderedDict) self.assertEqual(od, OrderedDict(p)) self.assertEqual(type(od), OrderedDict) # the object_pairs_hook takes priority over the object_hook - self.assertEqual(json.loads(s, + self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) + + +class TestPyDecode(TestDecode, PyTest): pass +class TestCDecode(TestDecode, CTest): pass diff --git a/lib-python/2.7/json/tests/test_default.py b/lib-python/2.7/json/tests/test_default.py --- a/lib-python/2.7/json/tests/test_default.py +++ b/lib-python/2.7/json/tests/test_default.py @@ -1,9 +1,12 @@ -from unittest import TestCase +from json.tests import PyTest, CTest -import json -class TestDefault(TestCase): +class TestDefault(object): def test_default(self): self.assertEqual( - json.dumps(type, default=repr), - json.dumps(repr(type))) + self.dumps(type, default=repr), + self.dumps(repr(type))) + + +class TestPyDefault(TestDefault, PyTest): pass +class TestCDefault(TestDefault, CTest): pass diff --git a/lib-python/2.7/json/tests/test_dump.py b/lib-python/2.7/json/tests/test_dump.py --- a/lib-python/2.7/json/tests/test_dump.py +++ b/lib-python/2.7/json/tests/test_dump.py @@ -1,21 +1,23 @@ -from unittest import TestCase from cStringIO import StringIO +from json.tests import PyTest, CTest -import json -class TestDump(TestCase): +class TestDump(object): def test_dump(self): sio = StringIO() - json.dump({}, sio) + self.json.dump({}, sio) self.assertEqual(sio.getvalue(), '{}') def test_dumps(self): - self.assertEqual(json.dumps({}), '{}') + self.assertEqual(self.dumps({}), '{}') def test_encode_truefalse(self): - self.assertEqual(json.dumps( + self.assertEqual(self.dumps( {True: False, False: True}, sort_keys=True), '{"false": true, "true": false}') - self.assertEqual(json.dumps( + self.assertEqual(self.dumps( {2: 3.0, 4.0: 5L, False: 1, 6L: True}, sort_keys=True), '{"false": 1, "2": 3.0, "4.0": 5, "6": true}') + +class TestPyDump(TestDump, PyTest): pass +class TestCDump(TestDump, CTest): pass diff --git a/lib-python/2.7/json/tests/test_encode_basestring_ascii.py b/lib-python/2.7/json/tests/test_encode_basestring_ascii.py --- a/lib-python/2.7/json/tests/test_encode_basestring_ascii.py +++ b/lib-python/2.7/json/tests/test_encode_basestring_ascii.py @@ -1,8 +1,6 @@ -from unittest import TestCase +from collections import OrderedDict +from json.tests import PyTest, CTest -import json.encoder -from json import dumps -from collections import OrderedDict CASES = [ (u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'), @@ -23,19 +21,11 @@ (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), ] -class TestEncodeBaseStringAscii(TestCase): - def test_py_encode_basestring_ascii(self): - self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii) - - def test_c_encode_basestring_ascii(self): - if not json.encoder.c_encode_basestring_ascii: - return - self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii) - - def _test_encode_basestring_ascii(self, encode_basestring_ascii): - fname = encode_basestring_ascii.__name__ +class TestEncodeBasestringAscii(object): + def test_encode_basestring_ascii(self): + fname = self.json.encoder.encode_basestring_ascii.__name__ for input_string, expect in CASES: - result = encode_basestring_ascii(input_string) + result = self.json.encoder.encode_basestring_ascii(input_string) self.assertEqual(result, expect, '{0!r} != {1!r} for {2}({3!r})'.format( result, expect, fname, input_string)) @@ -43,5 +33,9 @@ def test_ordered_dict(self): # See issue 6105 items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] - s = json.dumps(OrderedDict(items)) + s = self.dumps(OrderedDict(items)) self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}') + + +class TestPyEncodeBasestringAscii(TestEncodeBasestringAscii, PyTest): pass +class TestCEncodeBasestringAscii(TestEncodeBasestringAscii, CTest): pass diff --git a/lib-python/2.7/json/tests/test_fail.py b/lib-python/2.7/json/tests/test_fail.py --- a/lib-python/2.7/json/tests/test_fail.py +++ b/lib-python/2.7/json/tests/test_fail.py @@ -1,6 +1,4 @@ -from unittest import TestCase - -import json +from json.tests import PyTest, CTest # Fri Dec 30 18:57:26 2005 JSONDOCS = [ @@ -61,15 +59,15 @@ 18: "spec doesn't specify any nesting limitations", } -class TestFail(TestCase): +class TestFail(object): def test_failures(self): for idx, doc in enumerate(JSONDOCS): idx = idx + 1 if idx in SKIPS: - json.loads(doc) + self.loads(doc) continue try: - json.loads(doc) + self.loads(doc) except ValueError: pass else: @@ -79,7 +77,11 @@ data = {'a' : 1, (1, 2) : 2} #This is for c encoder - self.assertRaises(TypeError, json.dumps, data) + self.assertRaises(TypeError, self.dumps, data) #This is for python encoder - self.assertRaises(TypeError, json.dumps, data, indent=True) + self.assertRaises(TypeError, self.dumps, data, indent=True) + + +class TestPyFail(TestFail, PyTest): pass +class TestCFail(TestFail, CTest): pass diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -1,19 +1,22 @@ import math -from unittest import TestCase +from json.tests import PyTest, CTest -import json -class TestFloat(TestCase): +class TestFloat(object): def test_floats(self): for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]: - self.assertEqual(float(json.dumps(num)), num) - self.assertEqual(json.loads(json.dumps(num)), num) - self.assertEqual(json.loads(unicode(json.dumps(num))), num) + self.assertEqual(float(self.dumps(num)), num) + self.assertEqual(self.loads(self.dumps(num)), num) + self.assertEqual(self.loads(unicode(self.dumps(num))), num) def test_ints(self): for num in [1, 1L, 1<<32, 1<<64]: - self.assertEqual(json.dumps(num), str(num)) - self.assertEqual(int(json.dumps(num)), num) - self.assertEqual(json.loads(json.dumps(num)), num) - self.assertEqual(json.loads(unicode(json.dumps(num))), num) + self.assertEqual(self.dumps(num), str(num)) + self.assertEqual(int(self.dumps(num)), num) + self.assertEqual(self.loads(self.dumps(num)), num) + self.assertEqual(self.loads(unicode(self.dumps(num))), num) + + +class TestPyFloat(TestFloat, PyTest): pass +class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/json/tests/test_indent.py b/lib-python/2.7/json/tests/test_indent.py --- a/lib-python/2.7/json/tests/test_indent.py +++ b/lib-python/2.7/json/tests/test_indent.py @@ -1,9 +1,9 @@ -from unittest import TestCase +import textwrap +from StringIO import StringIO +from json.tests import PyTest, CTest -import json -import textwrap -class TestIndent(TestCase): +class TestIndent(object): def test_indent(self): h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', {'nifty': 87}, {'field': 'yes', 'morefield': False} ] @@ -30,12 +30,31 @@ ]""") - d1 = json.dumps(h) - d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) + d1 = self.dumps(h) + d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) - h1 = json.loads(d1) - h2 = json.loads(d2) + h1 = self.loads(d1) + h2 = self.loads(d2) self.assertEqual(h1, h) self.assertEqual(h2, h) self.assertEqual(d2, expect) + + def test_indent0(self): + h = {3: 1} + def check(indent, expected): + d1 = self.dumps(h, indent=indent) + self.assertEqual(d1, expected) + + sio = StringIO() + self.json.dump(h, sio, indent=indent) + self.assertEqual(sio.getvalue(), expected) + + # indent=0 should emit newlines + check(0, '{\n"3": 1\n}') + # indent=None is more compact + check(None, '{"3": 1}') + + +class TestPyIndent(TestIndent, PyTest): pass +class TestCIndent(TestIndent, CTest): pass diff --git a/lib-python/2.7/json/tests/test_pass1.py b/lib-python/2.7/json/tests/test_pass1.py --- a/lib-python/2.7/json/tests/test_pass1.py +++ b/lib-python/2.7/json/tests/test_pass1.py @@ -1,6 +1,5 @@ -from unittest import TestCase +from json.tests import PyTest, CTest -import json # from http://json.org/JSON_checker/test/pass1.json JSON = r''' @@ -62,15 +61,19 @@ ,"rosebud"] ''' -class TestPass1(TestCase): +class TestPass1(object): def test_parse(self): # test in/out equivalence and parsing - res = json.loads(JSON) - out = json.dumps(res) - self.assertEqual(res, json.loads(out)) + res = self.loads(JSON) + out = self.dumps(res) + self.assertEqual(res, self.loads(out)) try: - json.dumps(res, allow_nan=False) + self.dumps(res, allow_nan=False) except ValueError: pass else: self.fail("23456789012E666 should be out of range") + + +class TestPyPass1(TestPass1, PyTest): pass +class TestCPass1(TestPass1, CTest): pass diff --git a/lib-python/2.7/json/tests/test_pass2.py b/lib-python/2.7/json/tests/test_pass2.py --- a/lib-python/2.7/json/tests/test_pass2.py +++ b/lib-python/2.7/json/tests/test_pass2.py @@ -1,14 +1,18 @@ -from unittest import TestCase -import json +from json.tests import PyTest, CTest + # from http://json.org/JSON_checker/test/pass2.json JSON = r''' [[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] ''' -class TestPass2(TestCase): +class TestPass2(object): def test_parse(self): # test in/out equivalence and parsing - res = json.loads(JSON) - out = json.dumps(res) - self.assertEqual(res, json.loads(out)) + res = self.loads(JSON) + out = self.dumps(res) + self.assertEqual(res, self.loads(out)) + + +class TestPyPass2(TestPass2, PyTest): pass +class TestCPass2(TestPass2, CTest): pass diff --git a/lib-python/2.7/json/tests/test_pass3.py b/lib-python/2.7/json/tests/test_pass3.py --- a/lib-python/2.7/json/tests/test_pass3.py +++ b/lib-python/2.7/json/tests/test_pass3.py @@ -1,6 +1,5 @@ -from unittest import TestCase +from json.tests import PyTest, CTest -import json # from http://json.org/JSON_checker/test/pass3.json JSON = r''' @@ -12,9 +11,14 @@ } ''' -class TestPass3(TestCase): + +class TestPass3(object): def test_parse(self): # test in/out equivalence and parsing - res = json.loads(JSON) - out = json.dumps(res) - self.assertEqual(res, json.loads(out)) + res = self.loads(JSON) + out = self.dumps(res) + self.assertEqual(res, self.loads(out)) + + +class TestPyPass3(TestPass3, PyTest): pass +class TestCPass3(TestPass3, CTest): pass diff --git a/lib-python/2.7/json/tests/test_recursion.py b/lib-python/2.7/json/tests/test_recursion.py --- a/lib-python/2.7/json/tests/test_recursion.py +++ b/lib-python/2.7/json/tests/test_recursion.py @@ -1,28 +1,16 @@ -from unittest import TestCase +from json.tests import PyTest, CTest -import json class JSONTestObject: pass -class RecursiveJSONEncoder(json.JSONEncoder): - recurse = False - def default(self, o): - if o is JSONTestObject: - if self.recurse: - return [JSONTestObject] - else: - return 'JSONTestObject' - return json.JSONEncoder.default(o) - - -class TestRecursion(TestCase): +class TestRecursion(object): def test_listrecursion(self): x = [] x.append(x) try: - json.dumps(x) + self.dumps(x) except ValueError: pass else: @@ -31,7 +19,7 @@ y = [x] x.append(y) try: - json.dumps(x) + self.dumps(x) except ValueError: pass else: @@ -39,13 +27,13 @@ y = [] x = [y, y] # ensure that the marker is cleared - json.dumps(x) + self.dumps(x) def test_dictrecursion(self): x = {} x["test"] = x try: - json.dumps(x) + self.dumps(x) except ValueError: pass else: @@ -53,9 +41,19 @@ x = {} y = {"a": x, "b": x} # ensure that the marker is cleared - json.dumps(x) + self.dumps(x) def test_defaultrecursion(self): + class RecursiveJSONEncoder(self.json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return pyjson.JSONEncoder.default(o) + enc = RecursiveJSONEncoder() self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') enc.recurse = True @@ -65,3 +63,46 @@ pass else: self.fail("didn't raise ValueError on default recursion") + + + def test_highly_nested_objects_decoding(self): + # test that loading highly-nested objects doesn't segfault when C + # accelerations are used. See #12017 + # str + with self.assertRaises(RuntimeError): + self.loads('{"a":' * 100000 + '1' + '}' * 100000) + with self.assertRaises(RuntimeError): + self.loads('{"a":' * 100000 + '[1]' + '}' * 100000) + with self.assertRaises(RuntimeError): + self.loads('[' * 100000 + '1' + ']' * 100000) + # unicode + with self.assertRaises(RuntimeError): + self.loads(u'{"a":' * 100000 + u'1' + u'}' * 100000) + with self.assertRaises(RuntimeError): + self.loads(u'{"a":' * 100000 + u'[1]' + u'}' * 100000) + with self.assertRaises(RuntimeError): + self.loads(u'[' * 100000 + u'1' + u']' * 100000) + + def test_highly_nested_objects_encoding(self): + # See #12051 + l, d = [], {} + for x in xrange(100000): + l, d = [l], {'k':d} + with self.assertRaises(RuntimeError): + self.dumps(l) + with self.assertRaises(RuntimeError): + self.dumps(d) + + def test_endless_recursion(self): + # See #12051 + class EndlessJSONEncoder(self.json.JSONEncoder): + def default(self, o): + """If check_circular is False, this will keep adding another list.""" + return [o] + + with self.assertRaises(RuntimeError): + EndlessJSONEncoder(check_circular=False).encode(5j) + + +class TestPyRecursion(TestRecursion, PyTest): pass +class TestCRecursion(TestRecursion, CTest): pass diff --git a/lib-python/2.7/json/tests/test_scanstring.py b/lib-python/2.7/json/tests/test_scanstring.py --- a/lib-python/2.7/json/tests/test_scanstring.py +++ b/lib-python/2.7/json/tests/test_scanstring.py @@ -1,18 +1,10 @@ import sys -import decimal -from unittest import TestCase +from json.tests import PyTest, CTest -import json -import json.decoder -class TestScanString(TestCase): - def test_py_scanstring(self): - self._test_scanstring(json.decoder.py_scanstring) - - def test_c_scanstring(self): - self._test_scanstring(json.decoder.c_scanstring) - - def _test_scanstring(self, scanstring): +class TestScanstring(object): + def test_scanstring(self): + scanstring = self.json.decoder.scanstring self.assertEqual( scanstring('"z\\ud834\\udd20x"', 1, None, True), (u'z\U0001d120x', 16)) @@ -103,10 +95,15 @@ (u'Bad value', 12)) def test_issue3623(self): - self.assertRaises(ValueError, json.decoder.scanstring, b"xxx", 1, + self.assertRaises(ValueError, self.json.decoder.scanstring, b"xxx", 1, "xxx") self.assertRaises(UnicodeDecodeError, - json.encoder.encode_basestring_ascii, b"xx\xff") + self.json.encoder.encode_basestring_ascii, b"xx\xff") def test_overflow(self): - self.assertRaises(OverflowError, json.decoder.scanstring, b"xxx", sys.maxsize+1) + with self.assertRaises(OverflowError): + self.json.decoder.scanstring(b"xxx", sys.maxsize+1) + + +class TestPyScanstring(TestScanstring, PyTest): pass +class TestCScanstring(TestScanstring, CTest): pass diff --git a/lib-python/2.7/json/tests/test_separators.py b/lib-python/2.7/json/tests/test_separators.py --- a/lib-python/2.7/json/tests/test_separators.py +++ b/lib-python/2.7/json/tests/test_separators.py @@ -1,10 +1,8 @@ import textwrap -from unittest import TestCase +from json.tests import PyTest, CTest -import json - -class TestSeparators(TestCase): +class TestSeparators(object): def test_separators(self): h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', {'nifty': 87}, {'field': 'yes', 'morefield': False} ] @@ -31,12 +29,16 @@ ]""") - d1 = json.dumps(h) - d2 = json.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : ')) + d1 = self.dumps(h) + d2 = self.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : ')) - h1 = json.loads(d1) - h2 = json.loads(d2) + h1 = self.loads(d1) + h2 = self.loads(d2) self.assertEqual(h1, h) self.assertEqual(h2, h) self.assertEqual(d2, expect) + + +class TestPySeparators(TestSeparators, PyTest): pass +class TestCSeparators(TestSeparators, CTest): pass diff --git a/lib-python/2.7/json/tests/test_speedups.py b/lib-python/2.7/json/tests/test_speedups.py --- a/lib-python/2.7/json/tests/test_speedups.py +++ b/lib-python/2.7/json/tests/test_speedups.py @@ -1,24 +1,23 @@ -import decimal -from unittest import TestCase +from json.tests import CTest -from json import decoder, encoder, scanner -class TestSpeedups(TestCase): +class TestSpeedups(CTest): def test_scanstring(self): - self.assertEqual(decoder.scanstring.__module__, "_json") - self.assertTrue(decoder.scanstring is decoder.c_scanstring) + self.assertEqual(self.json.decoder.scanstring.__module__, "_json") + self.assertIs(self.json.decoder.scanstring, self.json.decoder.c_scanstring) def test_encode_basestring_ascii(self): - self.assertEqual(encoder.encode_basestring_ascii.__module__, "_json") - self.assertTrue(encoder.encode_basestring_ascii is - encoder.c_encode_basestring_ascii) + self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__, + "_json") + self.assertIs(self.json.encoder.encode_basestring_ascii, + self.json.encoder.c_encode_basestring_ascii) -class TestDecode(TestCase): +class TestDecode(CTest): def test_make_scanner(self): - self.assertRaises(AttributeError, scanner.c_make_scanner, 1) + self.assertRaises(AttributeError, self.json.scanner.c_make_scanner, 1) def test_make_encoder(self): - self.assertRaises(TypeError, encoder.c_make_encoder, + self.assertRaises(TypeError, self.json.encoder.c_make_encoder, None, "\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75", None) diff --git a/lib-python/2.7/json/tests/test_unicode.py b/lib-python/2.7/json/tests/test_unicode.py --- a/lib-python/2.7/json/tests/test_unicode.py +++ b/lib-python/2.7/json/tests/test_unicode.py @@ -1,11 +1,10 @@ -from unittest import TestCase +from collections import OrderedDict +from json.tests import PyTest, CTest -import json -from collections import OrderedDict -class TestUnicode(TestCase): +class TestUnicode(object): def test_encoding1(self): - encoder = json.JSONEncoder(encoding='utf-8') + encoder = self.json.JSONEncoder(encoding='utf-8') u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' s = u.encode('utf-8') ju = encoder.encode(u) @@ -15,68 +14,72 @@ def test_encoding2(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' s = u.encode('utf-8') - ju = json.dumps(u, encoding='utf-8') - js = json.dumps(s, encoding='utf-8') + ju = self.dumps(u, encoding='utf-8') + js = self.dumps(s, encoding='utf-8') self.assertEqual(ju, js) def test_encoding3(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' - j = json.dumps(u) + j = self.dumps(u) self.assertEqual(j, '"\\u03b1\\u03a9"') def test_encoding4(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' - j = json.dumps([u]) + j = self.dumps([u]) self.assertEqual(j, '["\\u03b1\\u03a9"]') def test_encoding5(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' - j = json.dumps(u, ensure_ascii=False) + j = self.dumps(u, ensure_ascii=False) self.assertEqual(j, u'"{0}"'.format(u)) def test_encoding6(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' - j = json.dumps([u], ensure_ascii=False) + j = self.dumps([u], ensure_ascii=False) self.assertEqual(j, u'["{0}"]'.format(u)) def test_big_unicode_encode(self): u = u'\U0001d120' - self.assertEqual(json.dumps(u), '"\\ud834\\udd20"') - self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') + self.assertEqual(self.dumps(u), '"\\ud834\\udd20"') + self.assertEqual(self.dumps(u, ensure_ascii=False), u'"\U0001d120"') def test_big_unicode_decode(self): u = u'z\U0001d120x' - self.assertEqual(json.loads('"' + u + '"'), u) - self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) + self.assertEqual(self.loads('"' + u + '"'), u) + self.assertEqual(self.loads('"z\\ud834\\udd20x"'), u) def test_unicode_decode(self): for i in range(0, 0xd7ff): u = unichr(i) s = '"\\u{0:04x}"'.format(i) - self.assertEqual(json.loads(s), u) + self.assertEqual(self.loads(s), u) def test_object_pairs_hook_with_unicode(self): s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4), (u"qrt", 5), (u"pad", 6), (u"hoy", 7)] - self.assertEqual(json.loads(s), eval(s)) - self.assertEqual(json.loads(s, object_pairs_hook = lambda x: x), p) - od = json.loads(s, object_pairs_hook = OrderedDict) + self.assertEqual(self.loads(s), eval(s)) + self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p) + od = self.loads(s, object_pairs_hook = OrderedDict) self.assertEqual(od, OrderedDict(p)) self.assertEqual(type(od), OrderedDict) # the object_pairs_hook takes priority over the object_hook - self.assertEqual(json.loads(s, + self.assertEqual(self.loads(s, object_pairs_hook = OrderedDict, object_hook = lambda x: None), OrderedDict(p)) def test_default_encoding(self): - self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + self.assertEqual(self.loads(u'{"a": "\xe9"}'.encode('utf-8')), {'a': u'\xe9'}) def test_unicode_preservation(self): - self.assertEqual(type(json.loads(u'""')), unicode) - self.assertEqual(type(json.loads(u'"a"')), unicode) - self.assertEqual(type(json.loads(u'["a"]')[0]), unicode) + self.assertEqual(type(self.loads(u'""')), unicode) + self.assertEqual(type(self.loads(u'"a"')), unicode) + self.assertEqual(type(self.loads(u'["a"]')[0]), unicode) # Issue 10038. - self.assertEqual(type(json.loads('"foo"')), unicode) + self.assertEqual(type(self.loads('"foo"')), unicode) + + +class TestPyUnicode(TestUnicode, PyTest): pass +class TestCUnicode(TestUnicode, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -163,7 +163,7 @@ extensions) exist, then the image type is chosen according to the depth of the X display: xbm images are chosen on monochrome displays and color images are chosen on color displays. By using - tix_ getimage, you can advoid hard coding the pathnames of the + tix_ getimage, you can avoid hard coding the pathnames of the image files in your application. When successful, this command returns the name of the newly created image, which can be used to configure the -image option of the Tk and Tix widgets. @@ -171,7 +171,7 @@ return self.tk.call('tix', 'getimage', name) def tix_option_get(self, name): - """Gets the options manitained by the Tix + """Gets the options maintained by the Tix scheme mechanism. Available options include: active_bg active_fg bg @@ -576,7 +576,7 @@ class ComboBox(TixWidget): """ComboBox - an Entry field with a dropdown menu. The user can select a - choice by either typing in the entry subwdget or selecting from the + choice by either typing in the entry subwidget or selecting from the listbox subwidget. Subwidget Class @@ -869,7 +869,7 @@ """HList - Hierarchy display widget can be used to display any data that have a hierarchical structure, for example, file system directory trees. The list entries are indented and connected by branch lines - according to their places in the hierachy. + according to their places in the hierarchy. Subwidgets - None""" @@ -1520,7 +1520,7 @@ self.tk.call(self._w, 'selection', 'set', first, last) class Tree(TixWidget): - """Tree - The tixTree widget can be used to display hierachical + """Tree - The tixTree widget can be used to display hierarchical data in a tree form. The user can adjust the view of the tree by opening or closing parts of the tree.""" diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py +++ b/lib-python/2.7/lib-tk/Tkinter.py @@ -1660,7 +1660,7 @@ class Tk(Misc, Wm): """Toplevel widget of Tk which represents mostly the main window - of an appliation. It has an associated Tcl interpreter.""" + of an application. It has an associated Tcl interpreter.""" _w = '.' def __init__(self, screenName=None, baseName=None, className='Tk', useTk=1, sync=0, use=None): diff --git a/lib-python/2.7/lib-tk/test/test_ttk/test_functions.py b/lib-python/2.7/lib-tk/test/test_ttk/test_functions.py --- a/lib-python/2.7/lib-tk/test/test_ttk/test_functions.py +++ b/lib-python/2.7/lib-tk/test/test_ttk/test_functions.py @@ -136,7 +136,7 @@ # minimum acceptable for image type self.assertEqual(ttk._format_elemcreate('image', False, 'test'), ("test ", ())) - # specifiyng a state spec + # specifying a state spec self.assertEqual(ttk._format_elemcreate('image', False, 'test', ('', 'a')), ("test {} a", ())) # state spec with multiple states diff --git a/lib-python/2.7/lib-tk/ttk.py b/lib-python/2.7/lib-tk/ttk.py --- a/lib-python/2.7/lib-tk/ttk.py +++ b/lib-python/2.7/lib-tk/ttk.py @@ -707,7 +707,7 @@ textvariable, values, width """ # The "values" option may need special formatting, so leave to - # _format_optdict the responsability to format it + # _format_optdict the responsibility to format it if "values" in kw: kw["values"] = _format_optdict({'v': kw["values"]})[1] @@ -993,7 +993,7 @@ pane is either an integer index or the name of a managed subwindow. If kw is not given, returns a dict of the pane option values. If option is specified then the value for that option is returned. - Otherwise, sets the options to the correspoding values.""" + Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "pane", pane) diff --git a/lib-python/2.7/lib-tk/turtle.py b/lib-python/2.7/lib-tk/turtle.py --- a/lib-python/2.7/lib-tk/turtle.py +++ b/lib-python/2.7/lib-tk/turtle.py @@ -1385,7 +1385,7 @@ Optional argument: picname -- a string, name of a gif-file or "nopic". - If picname is a filename, set the corresponing image as background. + If picname is a filename, set the corresponding image as background. If picname is "nopic", delete backgroundimage, if present. If picname is None, return the filename of the current backgroundimage. @@ -1409,7 +1409,7 @@ Optional arguments: canvwidth -- positive integer, new width of canvas in pixels canvheight -- positive integer, new height of canvas in pixels - bg -- colorstring or color-tupel, new backgroundcolor + bg -- colorstring or color-tuple, new backgroundcolor If no arguments are given, return current (canvaswidth, canvasheight) Do not alter the drawing window. To observe hidden parts of @@ -3079,9 +3079,9 @@ fill="", width=ps) # Turtle now at position old, self._position = old - ## if undo is done during crating a polygon, the last vertex - ## will be deleted. if the polygon is entirel deleted, - ## creatigPoly will be set to False. + ## if undo is done during creating a polygon, the last vertex + ## will be deleted. if the polygon is entirely deleted, + ## creatingPoly will be set to False. ## Polygons created before the last one will not be affected by undo() if self._creatingPoly: if len(self._poly) > 0: @@ -3221,7 +3221,7 @@ def dot(self, size=None, *color): """Draw a dot with diameter size, using color. - Optional argumentS: + Optional arguments: size -- an integer >= 1 (if given) color -- a colorstring or a numeric color tuple @@ -3691,7 +3691,7 @@ class Turtle(RawTurtle): - """RawTurtle auto-crating (scrolled) canvas. + """RawTurtle auto-creating (scrolled) canvas. When a Turtle object is created or a function derived from some Turtle method is called a TurtleScreen object is automatically created. @@ -3731,7 +3731,7 @@ filename -- a string, used as filename default value is turtle_docstringdict - Has to be called explicitely, (not used by the turtle-graphics classes) + Has to be called explicitly, (not used by the turtle-graphics classes) The docstring dictionary will be written to the Python script .py It is intended to serve as a template for translation of the docstrings into different languages. diff --git a/lib-python/2.7/lib2to3/__main__.py b/lib-python/2.7/lib2to3/__main__.py new file mode 100644 --- /dev/null +++ b/lib-python/2.7/lib2to3/__main__.py @@ -0,0 +1,4 @@ +import sys +from .main import main + +sys.exit(main("lib2to3.fixes")) diff --git a/lib-python/2.7/lib2to3/fixes/fix_itertools.py b/lib-python/2.7/lib2to3/fixes/fix_itertools.py --- a/lib-python/2.7/lib2to3/fixes/fix_itertools.py +++ b/lib-python/2.7/lib2to3/fixes/fix_itertools.py @@ -13,7 +13,7 @@ class FixItertools(fixer_base.BaseFix): BM_compatible = True - it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')" + it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" PATTERN = """ power< it='itertools' trailer< @@ -28,7 +28,8 @@ def transform(self, node, results): prefix = None func = results['func'][0] - if 'it' in results and func.value != u'ifilterfalse': + if ('it' in results and + func.value not in (u'ifilterfalse', u'izip_longest')): dot, it = (results['dot'], results['it']) # Remove the 'itertools' prefix = it.prefix diff --git a/lib-python/2.7/lib2to3/fixes/fix_itertools_imports.py b/lib-python/2.7/lib2to3/fixes/fix_itertools_imports.py --- a/lib-python/2.7/lib2to3/fixes/fix_itertools_imports.py +++ b/lib-python/2.7/lib2to3/fixes/fix_itertools_imports.py @@ -31,9 +31,10 @@ if member_name in (u'imap', u'izip', u'ifilter'): child.value = None child.remove() - elif member_name == u'ifilterfalse': + elif member_name in (u'ifilterfalse', u'izip_longest'): node.changed() - name_node.value = u'filterfalse' + name_node.value = (u'filterfalse' if member_name[1] == u'f' + else u'zip_longest') # Make sure the import statement is still sane children = imports.children[:] or [imports] diff --git a/lib-python/2.7/lib2to3/fixes/fix_metaclass.py b/lib-python/2.7/lib2to3/fixes/fix_metaclass.py --- a/lib-python/2.7/lib2to3/fixes/fix_metaclass.py +++ b/lib-python/2.7/lib2to3/fixes/fix_metaclass.py @@ -48,7 +48,7 @@ """ for node in cls_node.children: if node.type == syms.suite: - # already in the prefered format, do nothing + # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up diff --git a/lib-python/2.7/lib2to3/fixes/fix_urllib.py b/lib-python/2.7/lib2to3/fixes/fix_urllib.py --- a/lib-python/2.7/lib2to3/fixes/fix_urllib.py +++ b/lib-python/2.7/lib2to3/fixes/fix_urllib.py @@ -12,7 +12,7 @@ MAPPING = {"urllib": [ ("urllib.request", - ["URLOpener", "FancyURLOpener", "urlretrieve", + ["URLopener", "FancyURLopener", "urlretrieve", "_urlopener", "urlopen", "urlcleanup", "pathname2url", "url2pathname"]), ("urllib.parse", diff --git a/lib-python/2.7/lib2to3/main.py b/lib-python/2.7/lib2to3/main.py --- a/lib-python/2.7/lib2to3/main.py +++ b/lib-python/2.7/lib2to3/main.py @@ -101,7 +101,7 @@ parser.add_option("-j", "--processes", action="store", default=1, type="int", help="Run 2to3 concurrently") parser.add_option("-x", "--nofix", action="append", default=[], - help="Prevent a fixer from being run.") + help="Prevent a transformation from being run") parser.add_option("-l", "--list-fixes", action="store_true", help="List available transformations") parser.add_option("-p", "--print-function", action="store_true", @@ -113,7 +113,7 @@ parser.add_option("-w", "--write", action="store_true", help="Write back modified files") parser.add_option("-n", "--nobackups", action="store_true", default=False, - help="Don't write backups for modified files.") + help="Don't write backups for modified files") # Parse command line arguments refactor_stdin = False diff --git a/lib-python/2.7/lib2to3/patcomp.py b/lib-python/2.7/lib2to3/patcomp.py --- a/lib-python/2.7/lib2to3/patcomp.py +++ b/lib-python/2.7/lib2to3/patcomp.py @@ -12,6 +12,7 @@ # Python imports import os +import StringIO # Fairly local imports from .pgen2 import driver, literals, token, tokenize, parse, grammar @@ -32,7 +33,7 @@ def tokenize_wrapper(input): """Tokenizes a string suppressing significant whitespace.""" skip = set((token.NEWLINE, token.INDENT, token.DEDENT)) - tokens = tokenize.generate_tokens(driver.generate_lines(input).next) + tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline) for quintuple in tokens: type, value, start, end, line_text = quintuple if type not in skip: diff --git a/lib-python/2.7/lib2to3/pgen2/conv.py b/lib-python/2.7/lib2to3/pgen2/conv.py --- a/lib-python/2.7/lib2to3/pgen2/conv.py +++ b/lib-python/2.7/lib2to3/pgen2/conv.py @@ -51,7 +51,7 @@ self.finish_off() def parse_graminit_h(self, filename): - """Parse the .h file writen by pgen. (Internal) + """Parse the .h file written by pgen. (Internal) This file is a sequence of #define statements defining the nonterminals of the grammar as numbers. We build two tables @@ -82,7 +82,7 @@ return True def parse_graminit_c(self, filename): - """Parse the .c file writen by pgen. (Internal) + """Parse the .c file written by pgen. (Internal) The file looks as follows. The first two lines are always this: diff --git a/lib-python/2.7/lib2to3/pgen2/driver.py b/lib-python/2.7/lib2to3/pgen2/driver.py --- a/lib-python/2.7/lib2to3/pgen2/driver.py +++ b/lib-python/2.7/lib2to3/pgen2/driver.py @@ -19,6 +19,7 @@ import codecs import os import logging +import StringIO import sys # Pgen imports @@ -101,18 +102,10 @@ def parse_string(self, text, debug=False): """Parse a string and return the syntax tree.""" - tokens = tokenize.generate_tokens(generate_lines(text).next) + tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline) return self.parse_tokens(tokens, debug) -def generate_lines(text): - """Generator that behaves like readline without using StringIO.""" - for line in text.splitlines(True): - yield line - while True: - yield "" - - def load_grammar(gt="Grammar.txt", gp=None, save=True, force=False, logger=None): """Load the grammar (maybe from a pickle).""" diff --git a/lib-python/2.7/lib2to3/pytree.py b/lib-python/2.7/lib2to3/pytree.py --- a/lib-python/2.7/lib2to3/pytree.py +++ b/lib-python/2.7/lib2to3/pytree.py @@ -658,8 +658,8 @@ content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] - min: optinal minumum number of times to match, default 0 - max: optional maximum number of times tro match, default HUGE + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is @@ -743,9 +743,11 @@ else: # The reason for this is that hitting the recursion limit usually # results in some ugly messages about how RuntimeErrors are being - # ignored. - save_stderr = sys.stderr - sys.stderr = StringIO() + # ignored. We don't do this on non-CPython implementation because + # they don't have this problem. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() try: for count, r in self._recursive_matches(nodes, 0): if self.name: @@ -759,7 +761,8 @@ r[self.name] = nodes[:count] yield count, r finally: - sys.stderr = save_stderr + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr def _iterative_matches(self, nodes): """Helper to iteratively yield the matches.""" diff --git a/lib-python/2.7/lib2to3/refactor.py b/lib-python/2.7/lib2to3/refactor.py --- a/lib-python/2.7/lib2to3/refactor.py +++ b/lib-python/2.7/lib2to3/refactor.py @@ -302,13 +302,14 @@ Files and subdirectories starting with '.' are skipped. """ + py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: - if not name.startswith(".") and \ - os.path.splitext(name)[1].endswith("py"): + if (not name.startswith(".") and + os.path.splitext(name)[1] == py_ext): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) # Modify dirnames in-place to remove subdirs with leading dots diff --git a/lib-python/2.7/lib2to3/tests/data/py2_test_grammar.py b/lib-python/2.7/lib2to3/tests/data/py2_test_grammar.py --- a/lib-python/2.7/lib2to3/tests/data/py2_test_grammar.py +++ b/lib-python/2.7/lib2to3/tests/data/py2_test_grammar.py @@ -316,7 +316,7 @@ ### simple_stmt: small_stmt (';' small_stmt)* [';'] x = 1; pass; del x def foo(): - # verify statments that end with semi-colons + # verify statements that end with semi-colons x = 1; pass; del x; foo() diff --git a/lib-python/2.7/lib2to3/tests/data/py3_test_grammar.py b/lib-python/2.7/lib2to3/tests/data/py3_test_grammar.py --- a/lib-python/2.7/lib2to3/tests/data/py3_test_grammar.py +++ b/lib-python/2.7/lib2to3/tests/data/py3_test_grammar.py @@ -356,7 +356,7 @@ ### simple_stmt: small_stmt (';' small_stmt)* [';'] x = 1; pass; del x def foo(): - # verify statments that end with semi-colons + # verify statements that end with semi-colons x = 1; pass; del x; foo() diff --git a/lib-python/2.7/lib2to3/tests/test_fixers.py b/lib-python/2.7/lib2to3/tests/test_fixers.py --- a/lib-python/2.7/lib2to3/tests/test_fixers.py +++ b/lib-python/2.7/lib2to3/tests/test_fixers.py @@ -3623,16 +3623,24 @@ a = """%s(f, a)""" self.checkall(b, a) - def test_2(self): + def test_qualified(self): b = """itertools.ifilterfalse(a, b)""" a = """itertools.filterfalse(a, b)""" self.check(b, a) - def test_4(self): + b = """itertools.izip_longest(a, b)""" + a = """itertools.zip_longest(a, b)""" + self.check(b, a) + + def test_2(self): b = """ifilterfalse(a, b)""" a = """filterfalse(a, b)""" self.check(b, a) + b = """izip_longest(a, b)""" + a = """zip_longest(a, b)""" + self.check(b, a) + def test_space_1(self): b = """ %s(f, a)""" a = """ %s(f, a)""" @@ -3643,9 +3651,14 @@ a = """ itertools.filterfalse(a, b)""" self.check(b, a) + b = """ itertools.izip_longest(a, b)""" + a = """ itertools.zip_longest(a, b)""" + self.check(b, a) + def test_run_order(self): self.assert_runs_after('map', 'zip', 'filter') + class Test_itertools_imports(FixerTestCase): fixer = 'itertools_imports' @@ -3696,18 +3709,19 @@ s = "from itertools import bar as bang" self.unchanged(s) - def test_ifilter(self): - b = "from itertools import ifilterfalse" - a = "from itertools import filterfalse" - self.check(b, a) - - b = "from itertools import imap, ifilterfalse, foo" - a = "from itertools import filterfalse, foo" - self.check(b, a) - - b = "from itertools import bar, ifilterfalse, foo" - a = "from itertools import bar, filterfalse, foo" - self.check(b, a) + def test_ifilter_and_zip_longest(self): + for name in "filterfalse", "zip_longest": + b = "from itertools import i%s" % (name,) + a = "from itertools import %s" % (name,) + self.check(b, a) + + b = "from itertools import imap, i%s, foo" % (name,) + a = "from itertools import %s, foo" % (name,) + self.check(b, a) + + b = "from itertools import bar, i%s, foo" % (name,) + a = "from itertools import bar, %s, foo" % (name,) + self.check(b, a) def test_import_star(self): s = "from itertools import *" diff --git a/lib-python/2.7/lib2to3/tests/test_parser.py b/lib-python/2.7/lib2to3/tests/test_parser.py --- a/lib-python/2.7/lib2to3/tests/test_parser.py +++ b/lib-python/2.7/lib2to3/tests/test_parser.py @@ -19,6 +19,16 @@ # Local imports from lib2to3.pgen2 import tokenize from ..pgen2.parse import ParseError +from lib2to3.pygram import python_symbols as syms + + +class TestDriver(support.TestCase): + + def test_formfeed(self): + s = """print 1\n\x0Cprint 2\n""" + t = driver.parse_string(s) + self.assertEqual(t.children[0].children[0].type, syms.print_stmt) + self.assertEqual(t.children[1].children[0].type, syms.print_stmt) class GrammarTest(support.TestCase): diff --git a/lib-python/2.7/lib2to3/tests/test_refactor.py b/lib-python/2.7/lib2to3/tests/test_refactor.py --- a/lib-python/2.7/lib2to3/tests/test_refactor.py +++ b/lib-python/2.7/lib2to3/tests/test_refactor.py @@ -223,6 +223,7 @@ "hi.py", ".dumb", ".after.py", + "notpy.npy", "sappy"] expected = ["hi.py"] check(tree, expected) diff --git a/lib-python/2.7/lib2to3/tests/test_util.py b/lib-python/2.7/lib2to3/tests/test_util.py --- a/lib-python/2.7/lib2to3/tests/test_util.py +++ b/lib-python/2.7/lib2to3/tests/test_util.py @@ -568,8 +568,8 @@ def test_from_import(self): node = parse('bar()') - fixer_util.touch_import("cgi", "escape", node) - self.assertEqual(str(node), 'from cgi import escape\nbar()\n\n') + fixer_util.touch_import("html", "escape", node) + self.assertEqual(str(node), 'from html import escape\nbar()\n\n') def test_name_import(self): node = parse('bar()') diff --git a/lib-python/2.7/locale.py b/lib-python/2.7/locale.py --- a/lib-python/2.7/locale.py +++ b/lib-python/2.7/locale.py @@ -621,7 +621,7 @@ 'tactis': 'TACTIS', 'euc_jp': 'eucJP', 'euc_kr': 'eucKR', - 'utf_8': 'UTF8', + 'utf_8': 'UTF-8', 'koi8_r': 'KOI8-R', 'koi8_u': 'KOI8-U', # XXX This list is still incomplete. If you know more diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -1627,6 +1627,7 @@ h = wr() if h: try: + h.acquire() h.flush() h.close() except (IOError, ValueError): @@ -1635,6 +1636,8 @@ # references to them are still around at # application exit. pass + finally: + h.release() except: if raiseExceptions: raise diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -226,14 +226,14 @@ propagate = 1 logger = logging.getLogger(qn) if qn in existing: - i = existing.index(qn) + i = existing.index(qn) + 1 # start with the entry after qn prefixed = qn + "." pflen = len(prefixed) num_existing = len(existing) - i = i + 1 # look at the entry after qn - while (i < num_existing) and (existing[i][:pflen] == prefixed): - child_loggers.append(existing[i]) - i = i + 1 + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") diff --git a/lib-python/2.7/logging/handlers.py b/lib-python/2.7/logging/handlers.py --- a/lib-python/2.7/logging/handlers.py +++ b/lib-python/2.7/logging/handlers.py @@ -125,6 +125,7 @@ """ if self.stream: self.stream.close() + self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = "%s.%d" % (self.baseFilename, i) @@ -324,6 +325,7 @@ """ if self.stream: self.stream.close() + self.stream = None # get the time that this sequence started at and make it a TimeTuple t = self.rolloverAt - self.interval if self.utc: diff --git a/lib-python/2.7/mailbox.py b/lib-python/2.7/mailbox.py --- a/lib-python/2.7/mailbox.py +++ b/lib-python/2.7/mailbox.py @@ -234,27 +234,35 @@ def __init__(self, dirname, factory=rfc822.Message, create=True): """Initialize a Maildir instance.""" Mailbox.__init__(self, dirname, factory, create) + self._paths = { + 'tmp': os.path.join(self._path, 'tmp'), + 'new': os.path.join(self._path, 'new'), + 'cur': os.path.join(self._path, 'cur'), + } if not os.path.exists(self._path): if create: os.mkdir(self._path, 0700) - os.mkdir(os.path.join(self._path, 'tmp'), 0700) - os.mkdir(os.path.join(self._path, 'new'), 0700) - os.mkdir(os.path.join(self._path, 'cur'), 0700) + for path in self._paths.values(): + os.mkdir(path, 0o700) else: raise NoSuchMailboxError(self._path) self._toc = {} - self._last_read = None # Records last time we read cur/new - # NOTE: we manually invalidate _last_read each time we do any - # modifications ourselves, otherwise we might get tripped up by - # bogus mtime behaviour on some systems (see issue #6896). + self._toc_mtimes = {} + for subdir in ('cur', 'new'): + self._toc_mtimes[subdir] = os.path.getmtime(self._paths[subdir]) + self._last_read = time.time() # Records last time we read cur/new + self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing def add(self, message): """Add message and return assigned key.""" tmp_file = self._create_tmp() try: self._dump_message(message, tmp_file) - finally: - _sync_close(tmp_file) + except BaseException: + tmp_file.close() + os.remove(tmp_file.name) + raise + _sync_close(tmp_file) if isinstance(message, MaildirMessage): subdir = message.get_subdir() suffix = self.colon + message.get_info() @@ -280,15 +288,11 @@ raise if isinstance(message, MaildirMessage): os.utime(dest, (os.path.getatime(dest), message.get_date())) - # Invalidate cached toc - self._last_read = None return uniq def remove(self, key): """Remove the keyed message; raise KeyError if it doesn't exist.""" os.remove(os.path.join(self._path, self._lookup(key))) - # Invalidate cached toc (only on success) - self._last_read = None def discard(self, key): """If the keyed message exists, remove it.""" @@ -323,8 +327,6 @@ if isinstance(message, MaildirMessage): os.utime(new_path, (os.path.getatime(new_path), message.get_date())) - # Invalidate cached toc - self._last_read = None def get_message(self, key): """Return a Message representation or raise a KeyError.""" @@ -380,8 +382,8 @@ def flush(self): """Write any pending changes to disk.""" # Maildir changes are always written immediately, so there's nothing - # to do except invalidate our cached toc. - self._last_read = None + # to do. + pass def lock(self): """Lock the mailbox.""" @@ -479,36 +481,39 @@ def _refresh(self): """Update table of contents mapping.""" - if self._last_read is not None: - for subdir in ('new', 'cur'): - mtime = os.path.getmtime(os.path.join(self._path, subdir)) - if mtime > self._last_read: - break - else: + # If it has been less than two seconds since the last _refresh() call, + # we have to unconditionally re-read the mailbox just in case it has + # been modified, because os.path.mtime() has a 2 sec resolution in the + # most common worst case (FAT) and a 1 sec resolution typically. This + # results in a few unnecessary re-reads when _refresh() is called + # multiple times in that interval, but once the clock ticks over, we + # will only re-read as needed. Because the filesystem might be being + # served by an independent system with its own clock, we record and + # compare with the mtimes from the filesystem. Because the other + # system's clock might be skewing relative to our clock, we add an + # extra delta to our wait. The default is one tenth second, but is an + # instance variable and so can be adjusted if dealing with a + # particularly skewed or irregular system. + if time.time() - self._last_read > 2 + self._skewfactor: + refresh = False + for subdir in self._toc_mtimes: + mtime = os.path.getmtime(self._paths[subdir]) + if mtime > self._toc_mtimes[subdir]: + refresh = True + self._toc_mtimes[subdir] = mtime + if not refresh: return - - # We record the current time - 1sec so that, if _refresh() is called - # again in the same second, we will always re-read the mailbox - # just in case it's been modified. (os.path.mtime() only has - # 1sec resolution.) This results in a few unnecessary re-reads - # when _refresh() is called multiple times in the same second, - # but once the clock ticks over, we will only re-read as needed. - now = time.time() - 1 - + # Refresh toc self._toc = {} - def update_dir (subdir): - path = os.path.join(self._path, subdir) + for subdir in self._toc_mtimes: + path = self._paths[subdir] for entry in os.listdir(path): p = os.path.join(path, entry) if os.path.isdir(p): continue uniq = entry.split(self.colon)[0] self._toc[uniq] = os.path.join(subdir, entry) - - update_dir('new') - update_dir('cur') - - self._last_read = now + self._last_read = time.time() def _lookup(self, key): """Use TOC to return subpath for given key, or raise a KeyError.""" @@ -551,7 +556,7 @@ f = open(self._path, 'wb+') else: raise NoSuchMailboxError(self._path) - elif e.errno == errno.EACCES: + elif e.errno in (errno.EACCES, errno.EROFS): f = open(self._path, 'rb') else: raise @@ -700,9 +705,14 @@ def _append_message(self, message): """Append message to mailbox and return (start, stop) offsets.""" self._file.seek(0, 2) - self._pre_message_hook(self._file) - offsets = self._install_message(message) - self._post_message_hook(self._file) + before = self._file.tell() + try: + self._pre_message_hook(self._file) + offsets = self._install_message(message) + self._post_message_hook(self._file) + except BaseException: + self._file.truncate(before) + raise self._file.flush() self._file_length = self._file.tell() # Record current length of mailbox return offsets @@ -868,18 +878,29 @@ new_key = max(keys) + 1 new_path = os.path.join(self._path, str(new_key)) f = _create_carefully(new_path) + closed = False try: if self._locked: _lock_file(f) try: - self._dump_message(message, f) + try: + self._dump_message(message, f) + except BaseException: + # Unlock and close so it can be deleted on Windows + if self._locked: + _unlock_file(f) + _sync_close(f) + closed = True + os.remove(new_path) + raise if isinstance(message, MHMessage): self._dump_sequences(message, new_key) finally: if self._locked: _unlock_file(f) finally: - _sync_close(f) + if not closed: + _sync_close(f) return new_key def remove(self, key): @@ -1886,7 +1907,7 @@ try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: - if e.errno in (errno.EAGAIN, errno.EACCES): + if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS): raise ExternalClashError('lockf: lock unavailable: %s' % f.name) else: @@ -1896,7 +1917,7 @@ pre_lock = _create_temporary(f.name + '.lock') pre_lock.close() except IOError, e: - if e.errno == errno.EACCES: + if e.errno in (errno.EACCES, errno.EROFS): return # Without write access, just skip dotlocking. else: raise diff --git a/lib-python/2.7/msilib/__init__.py b/lib-python/2.7/msilib/__init__.py --- a/lib-python/2.7/msilib/__init__.py +++ b/lib-python/2.7/msilib/__init__.py @@ -173,11 +173,10 @@ add_data(db, table, getattr(module, table)) def make_id(str): - #str = str.replace(".", "_") # colons are allowed - str = str.replace(" ", "_") - str = str.replace("-", "_") - if str[0] in string.digits: - str = "_"+str + identifier_chars = string.ascii_letters + string.digits + "._" + str = "".join([c if c in identifier_chars else "_" for c in str]) + if str[0] in (string.digits + "."): + str = "_" + str assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str return str @@ -285,19 +284,28 @@ [(feature.id, component)]) def make_short(self, file): + oldfile = file + file = file.replace('+', '_') + file = ''.join(c for c in file if not c in ' "/\[]:;=,') parts = file.split(".") - if len(parts)>1: + if len(parts) > 1: + prefix = "".join(parts[:-1]).upper() suffix = parts[-1].upper() + if not prefix: + prefix = suffix + suffix = None else: + prefix = file.upper() suffix = None - prefix = parts[0].upper() - if len(prefix) <= 8 and (not suffix or len(suffix)<=3): + if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and ( + not suffix or len(suffix) <= 3): if suffix: file = prefix+"."+suffix else: file = prefix - assert file not in self.short_names else: + file = None + if file is None or file in self.short_names: prefix = prefix[:6] if suffix: suffix = suffix[:3] diff --git a/lib-python/2.7/multiprocessing/__init__.py b/lib-python/2.7/multiprocessing/__init__.py --- a/lib-python/2.7/multiprocessing/__init__.py +++ b/lib-python/2.7/multiprocessing/__init__.py @@ -38,6 +38,7 @@ # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __version__ = '0.70a1' @@ -115,8 +116,11 @@ except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': + comm = '/sbin/sysctl -n hw.ncpu' + if sys.platform == 'darwin': + comm = '/usr' + comm try: - with os.popen('sysctl -n hw.ncpu') as p: + with os.popen(comm) as p: num = int(p.read()) except ValueError: num = 0 diff --git a/lib-python/2.7/multiprocessing/connection.py b/lib-python/2.7/multiprocessing/connection.py --- a/lib-python/2.7/multiprocessing/connection.py +++ b/lib-python/2.7/multiprocessing/connection.py @@ -3,7 +3,33 @@ # # multiprocessing/connection.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [ 'Client', 'Listener', 'Pipe' ] diff --git a/lib-python/2.7/multiprocessing/dummy/__init__.py b/lib-python/2.7/multiprocessing/dummy/__init__.py --- a/lib-python/2.7/multiprocessing/dummy/__init__.py +++ b/lib-python/2.7/multiprocessing/dummy/__init__.py @@ -3,7 +3,33 @@ # # multiprocessing/dummy/__init__.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [ diff --git a/lib-python/2.7/multiprocessing/dummy/connection.py b/lib-python/2.7/multiprocessing/dummy/connection.py --- a/lib-python/2.7/multiprocessing/dummy/connection.py +++ b/lib-python/2.7/multiprocessing/dummy/connection.py @@ -3,7 +3,33 @@ # # multiprocessing/dummy/connection.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [ 'Client', 'Listener', 'Pipe' ] diff --git a/lib-python/2.7/multiprocessing/forking.py b/lib-python/2.7/multiprocessing/forking.py --- a/lib-python/2.7/multiprocessing/forking.py +++ b/lib-python/2.7/multiprocessing/forking.py @@ -3,7 +3,33 @@ # # multiprocessing/forking.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # import os @@ -172,6 +198,7 @@ TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") exit = win32.ExitProcess close = win32.CloseHandle @@ -181,7 +208,7 @@ # People embedding Python want to modify it. # - if sys.executable.lower().endswith('pythonservice.exe'): + if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable @@ -371,7 +398,7 @@ if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() - if not WINEXE: + if not WINEXE and not WINSERVICE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] diff --git a/lib-python/2.7/multiprocessing/heap.py b/lib-python/2.7/multiprocessing/heap.py --- a/lib-python/2.7/multiprocessing/heap.py +++ b/lib-python/2.7/multiprocessing/heap.py @@ -3,7 +3,33 @@ # # multiprocessing/heap.py # -# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # import bisect diff --git a/lib-python/2.7/multiprocessing/managers.py b/lib-python/2.7/multiprocessing/managers.py --- a/lib-python/2.7/multiprocessing/managers.py +++ b/lib-python/2.7/multiprocessing/managers.py @@ -4,7 +4,33 @@ # # multiprocessing/managers.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] diff --git a/lib-python/2.7/multiprocessing/pool.py b/lib-python/2.7/multiprocessing/pool.py --- a/lib-python/2.7/multiprocessing/pool.py +++ b/lib-python/2.7/multiprocessing/pool.py @@ -3,7 +3,33 @@ # # multiprocessing/pool.py # -# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = ['Pool'] @@ -269,6 +295,8 @@ while pool._worker_handler._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.1) + # send sentinel to stop workers + pool._taskqueue.put(None) debug('worker handler exiting') @staticmethod @@ -387,7 +415,6 @@ if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE - self._taskqueue.put(None) def terminate(self): debug('terminating pool') @@ -421,7 +448,6 @@ worker_handler._state = TERMINATE task_handler._state = TERMINATE - taskqueue.put(None) # sentinel debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) @@ -431,6 +457,11 @@ result_handler._state = TERMINATE outqueue.put(None) # sentinel + # We must wait for the worker handler to exit before terminating + # workers because we don't want workers to be restarted behind our back. + debug('joining worker handler') + worker_handler.join() + # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') diff --git a/lib-python/2.7/multiprocessing/process.py b/lib-python/2.7/multiprocessing/process.py --- a/lib-python/2.7/multiprocessing/process.py +++ b/lib-python/2.7/multiprocessing/process.py @@ -3,7 +3,33 @@ # # multiprocessing/process.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = ['Process', 'current_process', 'active_children'] diff --git a/lib-python/2.7/multiprocessing/queues.py b/lib-python/2.7/multiprocessing/queues.py --- a/lib-python/2.7/multiprocessing/queues.py +++ b/lib-python/2.7/multiprocessing/queues.py @@ -3,7 +3,33 @@ # # multiprocessing/queues.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] diff --git a/lib-python/2.7/multiprocessing/reduction.py b/lib-python/2.7/multiprocessing/reduction.py --- a/lib-python/2.7/multiprocessing/reduction.py +++ b/lib-python/2.7/multiprocessing/reduction.py @@ -4,7 +4,33 @@ # # multiprocessing/reduction.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [] diff --git a/lib-python/2.7/multiprocessing/sharedctypes.py b/lib-python/2.7/multiprocessing/sharedctypes.py --- a/lib-python/2.7/multiprocessing/sharedctypes.py +++ b/lib-python/2.7/multiprocessing/sharedctypes.py @@ -3,7 +3,33 @@ # # multiprocessing/sharedctypes.py # -# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # import sys @@ -52,9 +78,11 @@ Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) - if isinstance(size_or_initializer, int): + if isinstance(size_or_initializer, (int, long)): type_ = type_ * size_or_initializer - return _new_value(type_) + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) diff --git a/lib-python/2.7/multiprocessing/synchronize.py b/lib-python/2.7/multiprocessing/synchronize.py --- a/lib-python/2.7/multiprocessing/synchronize.py +++ b/lib-python/2.7/multiprocessing/synchronize.py @@ -3,7 +3,33 @@ # # multiprocessing/synchronize.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # __all__ = [ diff --git a/lib-python/2.7/multiprocessing/util.py b/lib-python/2.7/multiprocessing/util.py --- a/lib-python/2.7/multiprocessing/util.py +++ b/lib-python/2.7/multiprocessing/util.py @@ -3,7 +3,33 @@ # # multiprocessing/util.py # -# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. # import itertools diff --git a/lib-python/2.7/netrc.py b/lib-python/2.7/netrc.py --- a/lib-python/2.7/netrc.py +++ b/lib-python/2.7/netrc.py @@ -34,11 +34,19 @@ def _parse(self, file, fp): lexer = shlex.shlex(fp) lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" + lexer.commenters = lexer.commenters.replace('#', '') while 1: # Look for a machine, default, or macdef top-level keyword toplevel = tt = lexer.get_token() if not tt: break + elif tt[0] == '#': + # seek to beginning of comment, in case reading the token put + # us on a new line, and then skip the rest of the line. + pos = len(tt) + 1 + lexer.instream.seek(-pos, 1) + lexer.instream.readline() + continue elif tt == 'machine': entryname = lexer.get_token() elif tt == 'default': @@ -64,8 +72,8 @@ self.hosts[entryname] = {} while 1: tt = lexer.get_token() - if (tt=='' or tt == 'machine' or - tt == 'default' or tt =='macdef'): + if (tt.startswith('#') or + tt in {'', 'machine', 'default', 'macdef'}): if password: self.hosts[entryname] = (login, account, password) lexer.push_token(tt) diff --git a/lib-python/2.7/nntplib.py b/lib-python/2.7/nntplib.py --- a/lib-python/2.7/nntplib.py +++ b/lib-python/2.7/nntplib.py @@ -103,7 +103,7 @@ readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call - reader-specific comamnds, such as `group'. If you get + reader-specific commands, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ diff --git a/lib-python/2.7/ntpath.py b/lib-python/2.7/ntpath.py --- a/lib-python/2.7/ntpath.py +++ b/lib-python/2.7/ntpath.py @@ -310,7 +310,7 @@ # - $varname is accepted. # - %varname% is accepted. # - varnames can be made out of letters, digits and the characters '_-' -# (though is not verifed in the ${varname} and %varname% cases) +# (though is not verified in the ${varname} and %varname% cases) # XXX With COMMAND.COM you can use any characters in a variable name, # XXX except '^|<>='. diff --git a/lib-python/2.7/nturl2path.py b/lib-python/2.7/nturl2path.py --- a/lib-python/2.7/nturl2path.py +++ b/lib-python/2.7/nturl2path.py @@ -25,11 +25,14 @@ error = 'Bad URL: ' + url raise IOError, error drive = comp[0][-1].upper() + path = drive + ':' components = comp[1].split('/') - path = drive + ':' - for comp in components: + for comp in components: if comp: path = path + '\\' + urllib.unquote(comp) + # Issue #11474: url like '/C|/' should convert into 'C:\\' + if path.endswith(':') and url.endswith('/'): + path += '\\' return path def pathname2url(p): diff --git a/lib-python/2.7/numbers.py b/lib-python/2.7/numbers.py --- a/lib-python/2.7/numbers.py +++ b/lib-python/2.7/numbers.py @@ -63,7 +63,7 @@ @abstractproperty def imag(self): - """Retrieve the real component of this number. + """Retrieve the imaginary component of this number. This should subclass Real. """ diff --git a/lib-python/2.7/optparse.py b/lib-python/2.7/optparse.py --- a/lib-python/2.7/optparse.py +++ b/lib-python/2.7/optparse.py @@ -1131,6 +1131,11 @@ prog : string the name of the current program (to override os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). epilog : string paragraph of help text to print after option help diff --git a/lib-python/2.7/pickletools.py b/lib-python/2.7/pickletools.py --- a/lib-python/2.7/pickletools.py +++ b/lib-python/2.7/pickletools.py @@ -1370,7 +1370,7 @@ proto=0, doc="""Read an object from the memo and push it on the stack. - The index of the memo object to push is given by the newline-teriminated + The index of the memo object to push is given by the newline-terminated decimal string following. BINGET and LONG_BINGET are space-optimized versions. """), diff --git a/lib-python/2.7/pkgutil.py b/lib-python/2.7/pkgutil.py --- a/lib-python/2.7/pkgutil.py +++ b/lib-python/2.7/pkgutil.py @@ -11,7 +11,7 @@ __all__ = [ 'get_importer', 'iter_importers', 'get_loader', 'find_loader', - 'walk_packages', 'iter_modules', + 'walk_packages', 'iter_modules', 'get_data', 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path', ] diff --git a/lib-python/2.7/platform.py b/lib-python/2.7/platform.py --- a/lib-python/2.7/platform.py +++ b/lib-python/2.7/platform.py @@ -503,7 +503,7 @@ info = pipe.read() if pipe.close(): raise os.error,'command failed' - # XXX How can I supress shell errors from being written + # XXX How can I suppress shell errors from being written # to stderr ? except os.error,why: #print 'Command %s failed: %s' % (cmd,why) @@ -1448,9 +1448,10 @@ """ Returns a string identifying the Python implementation. Currently, the following implementations are identified: - 'CPython' (C implementation of Python), - 'IronPython' (.NET implementation of Python), - 'Jython' (Java implementation of Python). + 'CPython' (C implementation of Python), + 'IronPython' (.NET implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). """ return _sys_version()[0] diff --git a/lib-python/2.7/pydoc.py b/lib-python/2.7/pydoc.py --- a/lib-python/2.7/pydoc.py +++ b/lib-python/2.7/pydoc.py @@ -156,7 +156,7 @@ no.append(x) return yes, no -def visiblename(name, all=None): +def visiblename(name, all=None, obj=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant. _hidden_names = ('__builtins__', '__doc__', '__file__', '__path__', @@ -164,6 +164,9 @@ if name in _hidden_names: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 + # Namedtuples have public fields and methods with a single leading underscore + if name.startswith('_') and hasattr(obj, '_fields'): + return 1 if all is not None: # only document that which the programmer exported in __all__ return name in all @@ -475,9 +478,9 @@ def multicolumn(self, list, format, cols=4): """Format a list of items into a multi-column list.""" result = '' - rows = (len(list)+cols-1)/cols + rows = (len(list)+cols-1)//cols for col in range(cols): - result = result + '' % (100/cols) + result = result + '' % (100//cols) for i in range(rows*col, rows*col+rows): if i < len(list): result = result + format(list[i]) + '
    \n' @@ -627,7 +630,7 @@ # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): - if visiblename(key, all): + if visiblename(key, all, object): classes.append((key, value)) cdict[key] = cdict[value] = '#' + key for key, value in classes: @@ -643,13 +646,13 @@ # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): - if visiblename(key, all): + if visiblename(key, all, object): funcs.append((key, value)) fdict[key] = '#-' + key if inspect.isfunction(value): fdict[value] = fdict[key] data = [] for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all): + if visiblename(key, all, object): data.append((key, value)) doc = self.markup(getdoc(object), self.preformat, fdict, cdict) @@ -773,7 +776,7 @@ push('\n') return attrs - attrs = filter(lambda data: visiblename(data[0]), + attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) mdict = {} for key, kind, homecls, value in attrs: @@ -1042,18 +1045,18 @@ # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): - if visiblename(key, all): + if visiblename(key, all, object): classes.append((key, value)) funcs = [] for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): - if visiblename(key, all): + if visiblename(key, all, object): funcs.append((key, value)) data = [] for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all): + if visiblename(key, all, object): data.append((key, value)) modpkgs = [] @@ -1113,7 +1116,7 @@ result = result + self.section('CREDITS', str(object.__credits__)) return result - def docclass(self, object, name=None, mod=None): + def docclass(self, object, name=None, mod=None, *ignored): """Produce text documentation for a given class object.""" realname = object.__name__ name = name or realname @@ -1186,7 +1189,7 @@ name, mod, maxlen=70, doc=doc) + '\n') return attrs - attrs = filter(lambda data: visiblename(data[0]), + attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) while attrs: if mro: @@ -1718,8 +1721,9 @@ return '' return '' - def __call__(self, request=None): - if request is not None: + _GoInteractive = object() + def __call__(self, request=_GoInteractive): + if request is not self._GoInteractive: self.help(request) else: self.intro() diff --git a/lib-python/2.7/pydoc_data/topics.py b/lib-python/2.7/pydoc_data/topics.py --- a/lib-python/2.7/pydoc_data/topics.py +++ b/lib-python/2.7/pydoc_data/topics.py @@ -1,16 +1,16 @@ -# Autogenerated by Sphinx on Sat Jul 3 08:52:04 2010 +# Autogenerated by Sphinx on Sat Jun 11 09:49:30 2011 topics = {'assert': u'\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', - 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets. (This rule is relaxed as of\n Python 1.5; in earlier versions, the object had to be a tuple.\n Since strings are sequences, an assignment like ``a, b = "xy"`` is\n now legal as long as the string has the right length.)\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', + 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", - 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', + 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n``AttributeError`` is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer (plain or long) and the other must be a sequence.\nIn the former case, the numbers are converted to a common type and\nthen multiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: ``x == (x/y)*y + (x%y)``. Integer division and\nmodulo are also connected with the built-in function ``divmod()``:\n``divmod(x, y) == (x/y, x%y)``. These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere ``x/y`` is replaced by ``floor(x/y)`` or ``floor(x/y) - 1`` [3].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\n*String Formatting Operations*.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the ``divmod()`` function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``func_code`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec`` statement or the built-in ``eval()``\nfunction.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see *Slicings*). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed ``Ellipsis`` (a built-in name).\n\nIt is written as ``Ellipsis``.\n', - 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s ``stdio`` package and can be\ncreated with the built-in ``open()`` function. File objects are also\nreturned by some other built-in functions and methods, such as\n``os.popen()`` and ``os.fdopen()`` and the ``makefile()`` method of\nsocket objects. Temporary files can be created using the ``tempfile``\nmodule, and high-level file operations such as copying, moving, and\ndeleting files and directories can be achieved with the ``shutil``\nmodule.\n\nWhen a file operation fails for an I/O-related reason, the exception\n``IOError`` is raised. This includes situations where the operation\nis not defined for some reason, like ``seek()`` on a tty device or\nwriting a file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n ``ValueError`` after the file has been closed. Calling ``close()``\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the ``with`` statement. For example, the\n following code will automatically close *f* when the ``with`` block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a context\n manager for the ``with`` statement. If your code is intended to\n work with any file-like object, you can use the function\n ``contextlib.closing()`` instead of using the object directly.\n\nfile.flush()\n\n Flush the internal buffer, like ``stdio``\'s ``fflush()``. This may\n be a no-op on some file-like objects.\n\n Note: ``flush()`` does not necessarily write the file\'s data to disk.\n Use ``flush()`` followed by ``os.fsync()`` to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the ``fcntl`` module or ``os.read()`` and\n friends.\n\n Note: File-like objects which do not have a real file descriptor should\n *not* provide this method!\n\nfile.isatty()\n\n Return ``True`` if the file is connected to a tty(-like) device,\n else ``False``.\n\n Note: If a file-like object is not associated with a real file, this\n method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example ``iter(f)`` returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a ``for`` loop (for example, ``for line in f: print\n line``), the ``next()`` method is called repeatedly. This method\n returns the next input line, or raises ``StopIteration`` when EOF\n is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a ``for``\n loop the most efficient way of looping over the lines of a file (a\n very common operation), the ``next()`` method uses a hidden read-\n ahead buffer. As a consequence of using a read-ahead buffer,\n combining ``next()`` with other file methods (like ``readline()``)\n does not work right. However, using ``seek()`` to reposition the\n file to an absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function ``fread()`` more\n than once in an effort to acquire as close to *size* bytes as\n possible. Also note that when in non-blocking mode, less data than\n was requested may be returned, even if no *size* parameter was\n given.\n\n Note: This function is simply a wrapper for the underlying ``fread()``\n C function, and will behave the same in corner cases, such as\n whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [5] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. An empty string is\n returned *only* when EOF is encountered immediately.\n\n Note: Unlike ``stdio``\'s ``fgets()``, the returned string contains null\n characters (``\'\\0\'``) if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using ``readline()`` and return a list containing\n the lines thus read. If the optional *sizehint* argument is\n present, instead of reading up to EOF, whole lines totalling\n approximately *sizehint* bytes (possibly after rounding up to an\n internal buffer size) are read. Objects implementing a file-like\n interface may choose to ignore *sizehint* if it cannot be\n implemented, or cannot be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as ``iter(f)``.\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use ``for line in file`` instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like ``stdio``\'s ``fseek()``. The\n *whence* argument is optional and defaults to ``os.SEEK_SET`` or\n ``0`` (absolute file positioning); other values are ``os.SEEK_CUR``\n or ``1`` (seek relative to the current position) and\n ``os.SEEK_END`` or ``2`` (seek relative to the file\'s end). There\n is no return value.\n\n For example, ``f.seek(2, os.SEEK_CUR)`` advances the position by\n two and ``f.seek(-3, os.SEEK_END)`` sets the position to the third\n to last.\n\n Note that if the file is opened for appending (mode ``\'a\'`` or\n ``\'a+\'``), any ``seek()`` operations will be undone at the next\n write. If the file is only opened for writing in append mode (mode\n ``\'a\'``), this method is essentially a no-op, but it remains useful\n for files opened in append mode with reading enabled (mode\n ``\'a+\'``). If the file is opened in text mode (without ``\'b\'``),\n only offsets returned by ``tell()`` are legal. Use of other\n offsets causes undefined behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like ``stdio``\'s ``ftell()``.\n\n Note: On Windows, ``tell()`` can return illegal values (after an\n ``fgets()``) when reading files with Unix-style line-endings. Use\n binary mode (``\'rb\'``) to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the ``flush()`` or ``close()`` method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n ``readlines()``; ``writelines()`` does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as ``file.readline()``, and iteration ends when the\n``readline()`` method returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the ``close()`` method changes the value. It\n may not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be ``None``, in which case\n the file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n ``open()`` built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using ``open()``, the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form ``<...>``. This is a read-only attribute and\n may not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with the *--with-universal-newlines* option to\n **configure** (the default) this read-only attribute exists, and\n for files opened in universal newline read mode it keeps track of\n the types of newlines encountered while reading the file. The\n values it can take are ``\'\\r\'``, ``\'\\n\'``, ``\'\\r\\n\'``, ``None``\n (unknown, no newlines read yet) or a tuple containing all the\n newline types seen, to indicate that multiple newline conventions\n were encountered. For files not opened in universal newline read\n mode the value of this attribute will be ``None``.\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the ``print`` statement.\n Classes that are trying to simulate a file object should also have\n a writable ``softspace`` attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n ``softspace`` attribute.\n\n Note: This attribute is not used to control the ``print`` statement,\n but to allow the implementation of ``print`` to keep track of its\n internal state.\n', + 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s ``stdio`` package and can be\ncreated with the built-in ``open()`` function. File objects are also\nreturned by some other built-in functions and methods, such as\n``os.popen()`` and ``os.fdopen()`` and the ``makefile()`` method of\nsocket objects. Temporary files can be created using the ``tempfile``\nmodule, and high-level file operations such as copying, moving, and\ndeleting files and directories can be achieved with the ``shutil``\nmodule.\n\nWhen a file operation fails for an I/O-related reason, the exception\n``IOError`` is raised. This includes situations where the operation\nis not defined for some reason, like ``seek()`` on a tty device or\nwriting a file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n ``ValueError`` after the file has been closed. Calling ``close()``\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the ``with`` statement. For example, the\n following code will automatically close *f* when the ``with`` block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a context\n manager for the ``with`` statement. If your code is intended to\n work with any file-like object, you can use the function\n ``contextlib.closing()`` instead of using the object directly.\n\nfile.flush()\n\n Flush the internal buffer, like ``stdio``\'s ``fflush()``. This may\n be a no-op on some file-like objects.\n\n Note: ``flush()`` does not necessarily write the file\'s data to disk.\n Use ``flush()`` followed by ``os.fsync()`` to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the ``fcntl`` module or ``os.read()`` and\n friends.\n\n Note: File-like objects which do not have a real file descriptor should\n *not* provide this method!\n\nfile.isatty()\n\n Return ``True`` if the file is connected to a tty(-like) device,\n else ``False``.\n\n Note: If a file-like object is not associated with a real file, this\n method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example ``iter(f)`` returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a ``for`` loop (for example, ``for line in f: print\n line``), the ``next()`` method is called repeatedly. This method\n returns the next input line, or raises ``StopIteration`` when EOF\n is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a ``for``\n loop the most efficient way of looping over the lines of a file (a\n very common operation), the ``next()`` method uses a hidden read-\n ahead buffer. As a consequence of using a read-ahead buffer,\n combining ``next()`` with other file methods (like ``readline()``)\n does not work right. However, using ``seek()`` to reposition the\n file to an absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function ``fread()`` more\n than once in an effort to acquire as close to *size* bytes as\n possible. Also note that when in non-blocking mode, less data than\n was requested may be returned, even if no *size* parameter was\n given.\n\n Note: This function is simply a wrapper for the underlying ``fread()``\n C function, and will behave the same in corner cases, such as\n whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [5] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. When *size* is not\n 0, an empty string is returned *only* when EOF is encountered\n immediately.\n\n Note: Unlike ``stdio``\'s ``fgets()``, the returned string contains null\n characters (``\'\\0\'``) if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using ``readline()`` and return a list containing\n the lines thus read. If the optional *sizehint* argument is\n present, instead of reading up to EOF, whole lines totalling\n approximately *sizehint* bytes (possibly after rounding up to an\n internal buffer size) are read. Objects implementing a file-like\n interface may choose to ignore *sizehint* if it cannot be\n implemented, or cannot be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as ``iter(f)``.\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use ``for line in file`` instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like ``stdio``\'s ``fseek()``. The\n *whence* argument is optional and defaults to ``os.SEEK_SET`` or\n ``0`` (absolute file positioning); other values are ``os.SEEK_CUR``\n or ``1`` (seek relative to the current position) and\n ``os.SEEK_END`` or ``2`` (seek relative to the file\'s end). There\n is no return value.\n\n For example, ``f.seek(2, os.SEEK_CUR)`` advances the position by\n two and ``f.seek(-3, os.SEEK_END)`` sets the position to the third\n to last.\n\n Note that if the file is opened for appending (mode ``\'a\'`` or\n ``\'a+\'``), any ``seek()`` operations will be undone at the next\n write. If the file is only opened for writing in append mode (mode\n ``\'a\'``), this method is essentially a no-op, but it remains useful\n for files opened in append mode with reading enabled (mode\n ``\'a+\'``). If the file is opened in text mode (without ``\'b\'``),\n only offsets returned by ``tell()`` are legal. Use of other\n offsets causes undefined behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like ``stdio``\'s ``ftell()``.\n\n Note: On Windows, ``tell()`` can return illegal values (after an\n ``fgets()``) when reading files with Unix-style line-endings. Use\n binary mode (``\'rb\'``) to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the ``flush()`` or ``close()`` method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n ``readlines()``; ``writelines()`` does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as ``file.readline()``, and iteration ends when the\n``readline()`` method returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the ``close()`` method changes the value. It\n may not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be ``None``, in which case\n the file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n ``open()`` built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using ``open()``, the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form ``<...>``. This is a read-only attribute and\n may not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with universal newlines enabled (the default)\n this read-only attribute exists, and for files opened in universal\n newline read mode it keeps track of the types of newlines\n encountered while reading the file. The values it can take are\n ``\'\\r\'``, ``\'\\n\'``, ``\'\\r\\n\'``, ``None`` (unknown, no newlines read\n yet) or a tuple containing all the newline types seen, to indicate\n that multiple newline conventions were encountered. For files not\n opened in universal newline read mode the value of this attribute\n will be ``None``.\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the ``print`` statement.\n Classes that are trying to simulate a file object should also have\n a writable ``softspace`` attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n ``softspace`` attribute.\n\n Note: This attribute is not used to control the ``print`` statement,\n but to allow the implementation of ``print`` to keep track of its\n internal state.\n', 'bltin-null-object': u"\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name).\n\nIt is written as ``None``.\n", 'bltin-type-objects': u"\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ````.\n", 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the ``__nonzero__()`` special method for a way to\nchange this.)\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', @@ -20,39 +20,39 @@ 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'coercion-rules': u"\nCoercion rules\n**************\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don't define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator '``+``', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base's ``__rop__()`` method, the right operand's ``__rop__()``\n method is tried *before* the left operand's ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand's ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type's ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like '``+=``') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n", 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', - 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', + 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the **with_item**)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': u'\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``xy`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', - 'debugger': u'\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``c`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run_*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', + 'debugger': u'\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``c`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', 'del': u'\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather that spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n', 'else': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exec': u'\nThe ``exec`` statement\n**********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a string, an open file object, or\na code object. If it is a string, the string is parsed as a suite of\nPython statements which is then executed (unless a syntax error\noccurs). [1] If it is an open file, the file is parsed until EOF and\nexecuted. If it is a code object, it is simply executed. In all\ncases, the code that\'s executed is expected to be valid as file input\n(see section *File input*). Be aware that the ``return`` and\n``yield`` statements may not be used outside of function definitions\neven within the context of code passed to the ``exec`` statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after ``in`` is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module\n``__builtin__`` under the key ``__builtins__`` (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function ``eval()``. The built-in functions\n``globals()`` and ``locals()`` return the current global and local\ndictionary, respectively, which may be useful to pass around for use\nby ``exec``.\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use universal newline mode to convert Windows or Mac-style\n newlines.\n', - 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', + 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, ``077e010`` is legal, and denotes the same number as\n``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': u'\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', - 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either\neither a number or a keyword. If it\'s a number, it refers to a\npositional argument, and if it\'s a keyword, it refers to a named\nkeyword argument. If the numerical arg_names in a format string are\n0, 1, 2, ... in sequence, they can all be omitted (not just some) and\nthe numbers 0, 1, 2, ... will be automatically inserted in that order.\nThe *arg_name* can be followed by any number of index or attribute\nexpressions. An expression of the form ``\'.name\'`` selects the named\nattribute using ``getattr()``, while an expression of the form\n``\'[index]\'`` does an index lookup using ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'}\' (which\nsignifies the end of the field). The presence of a fill character is\nsignaled by the *next* character, which must be one of the alignment\noptions. If the second character of *format_spec* is not a valid\nalignment option, then it is assumed that both the fill character and\nthe alignment option are absent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Postive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}.\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{align}{fill}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', + 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either\neither a number or a keyword. If it\'s a number, it refers to a\npositional argument, and if it\'s a keyword, it refers to a named\nkeyword argument. If the numerical arg_names in a format string are\n0, 1, 2, ... in sequence, they can all be omitted (not just some) and\nthe numbers 0, 1, 2, ... will be automatically inserted in that order.\nThe *arg_name* can be followed by any number of index or attribute\nexpressions. An expression of the form ``\'.name\'`` selects the named\nattribute using ``getattr()``, while an expression of the form\n``\'[index]\'`` does an index lookup using ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}.\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n', 'global': u'\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in an\n``exec`` statement does not affect the code block *containing* the\n``exec`` statement, and code contained in an ``exec`` statement is\nunaffected by ``global`` statements in the code containing the\n``exec`` statement. The same applies to the ``eval()``,\n``execfile()`` and ``compile()`` functions.\n', - 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', - 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Both ``as`` and ``with`` are only recognized\nwhen the ``with_statement`` future feature has been enabled. It will\nalways be enabled in Python 2.6. See section *The with statement* for\ndetails. Note that using ``as`` and ``with`` as identifiers will\nalways issue a warning, even when the ``with_statement`` future\ndirective is not in effect.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Both ``as`` and ``with`` are only recognized\nwhen the ``with_statement`` future feature has been enabled. It will\nalways be enabled in Python 2.6. See section *The with statement* for\ndetails. Note that using ``as`` and ``with`` as identifiers will\nalways issue a warning, even when the ``with_statement`` future\ndirective is not in effect.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', - 'import': u'\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimprt mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', + 'import': u'\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case ``\'l\'`` and upper case ``\'L\'`` are allowed as\nsuffix for long integers, it is strongly recommended to always use\n``\'L\'``, since the letter ``\'l\'`` looks too much like the digit\n``\'1\'``.\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n old_lambda_form ::= "lambda" [parameter_list]: old_expression\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_form\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n', - 'naming': u"\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no 's') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n", + 'naming': u"\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no 's') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n", 'numbers': u"\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n', - 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change.\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', - 'operator-summary': u'\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. Function\n ``fmod()`` in the ``math`` module returns a result whose sign\n matches the sign of the first argument instead, and so returns\n ``-1e-100`` in this case. Which approach is more appropriate\n depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', + 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', + 'operator-summary': u'\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': u'\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, ``10**2`` returns ``100``, but\n``10**-2`` returns ``0.01``. (This last feature was added in Python\n2.2. In Python 2.1 and before, if both arguments were of integer types\nand the second argument was negative, an exception was raised).\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``ValueError``.\n', 'print': u'\nThe ``print`` statement\n***********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n``print`` evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except ``\' \'``, or (3) when the last\nwrite operation on standard output was not a ``print`` statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the built-in\n file objects often do not properly emulate this aspect of the file\n object\'s behavior, so it is best not to rely on this.\n\nA ``\'\\n\'`` character is written at the end, unless the ``print``\nstatement ends with a comma. This is the only action if the statement\ncontains just the keyword ``print``.\n\nStandard output is defined as the file object named ``stdout`` in the\nbuilt-in module ``sys``. If no such object exists, or if it does not\nhave a ``write()`` method, a ``RuntimeError`` exception is raised.\n\n``print`` also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n"``print`` chevron." In this form, the first expression after the\n``>>`` must evaluate to a "file-like" object, specifically an object\nthat has a ``write()`` method as described above. With this extended\nform, the subsequent expressions are printed to this file object. If\nthe first expression evaluates to ``None``, then ``sys.stdout`` is\nused as the file for output.\n', @@ -63,21 +63,21 @@ 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2, n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,\nn)``. Negative shift counts raise a ``ValueError`` exception.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n``sys.maxint``, respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that ``i <= k < j`` where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in ``Ellipsis`` object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose ``start``, ``stop`` and ``step`` attributes are the values of\nthe expressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': u"\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object's\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can't tell the type of the\n operands.\n\n[4] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n\n[5] The advantage of leaving the newline on is that returning an empty\n string is then an unambiguous EOF indication. It is also possible\n (in cases where it might matter, for example, if you want to make\n an exact copy of a file while scanning its lines) to tell whether\n the last line of a file ended in a newline or not (yes this\n happens!).\n", - 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``xy`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', + 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``xy`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-conversions': u'\nString conversions\n******************\n\nA string conversion is an expression list enclosed in reverse (a.k.a.\nbackward) quotes:\n\n string_conversion ::= "\'" expression_list "\'"\n\nA string conversion evaluates the contained expression list and\nconverts the resulting object into a string according to rules\nspecific to its type.\n\nIf the object is a string, a number, ``None``, or a tuple, list or\ndictionary containing only objects whose type is one of these, the\nresulting string is a valid Python expression which can be passed to\nthe built-in function ``eval()`` to yield an expression with the same\nvalue (or an approximation, if floating point numbers are involved).\n\n(In particular, converting a string adds quotes around it and converts\n"funny" characters to escape sequences that are safe to print.)\n\nRecursive objects (for example, lists or dictionaries that contain a\nreference to themselves, directly or indirectly) use ``...`` to\nindicate a recursive reference, and the result cannot be passed to\n``eval()`` to get an equal value (``SyntaxError`` will be raised\ninstead).\n\nThe built-in function ``repr()`` performs exactly the same conversion\nin its argument as enclosing it in parentheses and reverse quotes\ndoes. The built-in function ``str()`` performs a similar but more\nuser-friendly conversion.\n', - 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n', - 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= \n longstringchar ::= \n escapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the **stringprefix** and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. The two prefix characters may be\ncombined; in this case, ``\'u\'`` must appear before ``\'r\'``.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n', + 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n', + 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= \n longstringchar ::= \n escapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the **stringprefix** and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of ``\'b\'`` or ``\'B\'`` is\nignored in Python 2; it indicates that the literal should become a\nbytes literal in Python 3 (e.g. when code is automatically converted\nwith 2to3). A ``\'u\'`` or ``\'b\'`` prefix may be followed by an ``\'r\'``\nprefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., ``x[-1]`` selects the last item of\n``x``.) The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u"\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0L``, ``0.0``,\n ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__nonzero__()`` or ``__len__()`` method, when that method returns\n the integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': u'\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', - 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *list*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', + 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', - 'typesmapping': u'\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 2, "two": 3}``:\n\n * ``dict(one=2, two=3)``\n\n * ``dict({\'one\': 2, \'two\': 3})``\n\n * ``dict(zip((\'one\', \'two\'), (2, 3)))``\n\n * ``dict([[\'two\', 3], [\'one\', 2]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as a tuple or other iterable of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', + 'typesmapping': u'\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 1, "two": 2}``:\n\n * ``dict(one=1, two=2)``\n\n * ``dict({\'one\': 1, \'two\': 2})``\n\n * ``dict(zip((\'one\', \'two\'), (1, 2)))``\n\n * ``dict([[\'two\', 2], [\'one\', 1]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', 'typesmethods': u"\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method's class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set a method attribute results in a ``TypeError`` being\nraised. In order to set a method attribute, you need to explicitly\nset it on the underlying function object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.im_func.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n", 'typesmodules': u"\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special member of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ````. If loaded from a file, they are written as\n````.\n", - 'typesseq': u'\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``buffer``, ``xrange``\n************************************************************************************\n\nThere are six sequence types: strings, Unicode strings, lists, tuples,\nbuffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*\'th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [4] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(#)03d quote types.\' % \\\n... {\'language\': "Python", "#": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n', - 'typesseq-mutable': u"\nMutable Sequence Types\n**********************\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n", + 'typesseq': u'\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``bytearray``, ``buffer``, ``xrange``\n***************************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBytearray objects are created with the built-in function\n``bytearray()``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*\'th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.index(i)`` | index of the first occurence of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.count(i)`` | total number of occurences of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [4] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n', + 'typesseq-mutable': u"\nMutable Sequence Types\n**********************\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n", 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of ``x`` is\ndefined as ``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': u'\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', - 'with': u'\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', + 'with': u'\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the **with_item**)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': u'\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of **expression_list** is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'} diff --git a/lib-python/2.7/random.py b/lib-python/2.7/random.py --- a/lib-python/2.7/random.py +++ b/lib-python/2.7/random.py @@ -317,7 +317,7 @@ n = len(population) if not 0 <= k <= n: - raise ValueError, "sample larger than population" + raise ValueError("sample larger than population") random = self.random _int = int result = [None] * k @@ -490,6 +490,12 @@ Conditions on the parameters are alpha > 0 and beta > 0. + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 @@ -592,7 +598,7 @@ ## -------------------- beta -------------------- ## See -## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470 +## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html ## for Ivan Frohne's insightful analysis of why the original implementation: ## ## def betavariate(self, alpha, beta): diff --git a/lib-python/2.7/re.py b/lib-python/2.7/re.py --- a/lib-python/2.7/re.py +++ b/lib-python/2.7/re.py @@ -207,8 +207,7 @@ "Escape all non-alphanumeric characters in pattern." s = list(pattern) alphanum = _alphanum - for i in range(len(pattern)): - c = pattern[i] + for i, c in enumerate(pattern): if c not in alphanum: if c == "\000": s[i] = "\\000" diff --git a/lib-python/2.7/shutil.py b/lib-python/2.7/shutil.py --- a/lib-python/2.7/shutil.py +++ b/lib-python/2.7/shutil.py @@ -277,6 +277,12 @@ """ real_dst = dst if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error, "Destination path '%s' already exists" % real_dst @@ -336,7 +342,7 @@ archive that is being built. If not provided, the current owner and group will be used. - The output tar file will be named 'base_dir' + ".tar", possibly plus + The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", or ".bz2"). Returns the output filename. @@ -406,7 +412,7 @@ def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): """Create a zip file from all the files under 'base_dir'. - The output zip file will be named 'base_dir' + ".zip". Uses either the + The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises ExecError. Returns the name of the output zip diff --git a/lib-python/2.7/site.py b/lib-python/2.7/site.py --- a/lib-python/2.7/site.py +++ b/lib-python/2.7/site.py @@ -61,6 +61,7 @@ import sys import os import __builtin__ +import traceback # Prefixes for site-packages; add additional prefixes like /usr/local here PREFIXES = [sys.prefix, sys.exec_prefix] @@ -155,17 +156,26 @@ except IOError: return with f: - for line in f: + for n, line in enumerate(f): if line.startswith("#"): continue - if line.startswith(("import ", "import\t")): - exec line - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if not dircase in known_paths and os.path.exists(dir): - sys.path.append(dir) - known_paths.add(dircase) + try: + if line.startswith(("import ", "import\t")): + exec line + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if not dircase in known_paths and os.path.exists(dir): + sys.path.append(dir) + known_paths.add(dircase) + except Exception as err: + print >>sys.stderr, "Error processing line {:d} of {}:\n".format( + n+1, fullname) + for record in traceback.format_exception(*sys.exc_info()): + for line in record.splitlines(): + print >>sys.stderr, ' '+line + print >>sys.stderr, "\nRemainder of file ignored" + break if reset: known_paths = None return known_paths diff --git a/lib-python/2.7/smtplib.py b/lib-python/2.7/smtplib.py --- a/lib-python/2.7/smtplib.py +++ b/lib-python/2.7/smtplib.py @@ -49,17 +49,18 @@ from email.base64mime import encode as encode_base64 from sys import stderr -__all__ = ["SMTPException","SMTPServerDisconnected","SMTPResponseException", - "SMTPSenderRefused","SMTPRecipientsRefused","SMTPDataError", - "SMTPConnectError","SMTPHeloError","SMTPAuthenticationError", - "quoteaddr","quotedata","SMTP"] +__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException", + "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", + "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", + "quoteaddr", "quotedata", "SMTP"] SMTP_PORT = 25 SMTP_SSL_PORT = 465 -CRLF="\r\n" +CRLF = "\r\n" OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) + # Exception classes used by this module. class SMTPException(Exception): """Base class for all exceptions raised by this module.""" @@ -109,7 +110,7 @@ def __init__(self, recipients): self.recipients = recipients - self.args = ( recipients,) + self.args = (recipients,) class SMTPDataError(SMTPResponseException): @@ -128,6 +129,7 @@ combination provided. """ + def quoteaddr(addr): """Quote a subset of the email addresses defined by RFC 821. @@ -138,7 +140,7 @@ m = email.utils.parseaddr(addr)[1] except AttributeError: pass - if m == (None, None): # Indicates parse failure or AttributeError + if m == (None, None): # Indicates parse failure or AttributeError # something weird here.. punt -ddm return "<%s>" % addr elif m is None: @@ -175,7 +177,8 @@ chr = None while chr != "\n": chr = self.sslobj.read(1) - if not chr: break + if not chr: + break str += chr return str @@ -219,6 +222,7 @@ ehlo_msg = "ehlo" ehlo_resp = None does_esmtp = 0 + default_port = SMTP_PORT def __init__(self, host='', port=0, local_hostname=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): @@ -234,7 +238,6 @@ """ self.timeout = timeout self.esmtp_features = {} - self.default_port = SMTP_PORT if host: (code, msg) = self.connect(host, port) if code != 220: @@ -269,10 +272,11 @@ def _get_socket(self, port, host, timeout): # This makes it simpler for SMTP_SSL to use the SMTP connect code # and just alter the socket connection bit. - if self.debuglevel > 0: print>>stderr, 'connect:', (host, port) + if self.debuglevel > 0: + print>>stderr, 'connect:', (host, port) return socket.create_connection((port, host), timeout) - def connect(self, host='localhost', port = 0): + def connect(self, host='localhost', port=0): """Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and @@ -286,20 +290,25 @@ if not port and (host.find(':') == host.rfind(':')): i = host.rfind(':') if i >= 0: - host, port = host[:i], host[i+1:] - try: port = int(port) + host, port = host[:i], host[i + 1:] + try: + port = int(port) except ValueError: raise socket.error, "nonnumeric port" - if not port: port = self.default_port - if self.debuglevel > 0: print>>stderr, 'connect:', (host, port) + if not port: + port = self.default_port + if self.debuglevel > 0: + print>>stderr, 'connect:', (host, port) self.sock = self._get_socket(host, port, self.timeout) (code, msg) = self.getreply() - if self.debuglevel > 0: print>>stderr, "connect:", msg + if self.debuglevel > 0: + print>>stderr, "connect:", msg return (code, msg) def send(self, str): """Send `str' to the server.""" - if self.debuglevel > 0: print>>stderr, 'send:', repr(str) + if self.debuglevel > 0: + print>>stderr, 'send:', repr(str) if hasattr(self, 'sock') and self.sock: try: self.sock.sendall(str) @@ -330,7 +339,7 @@ Raises SMTPServerDisconnected if end-of-file is reached. """ - resp=[] + resp = [] if self.file is None: self.file = self.sock.makefile('rb') while 1: @@ -341,9 +350,10 @@ if line == '': self.close() raise SMTPServerDisconnected("Connection unexpectedly closed") - if self.debuglevel > 0: print>>stderr, 'reply:', repr(line) + if self.debuglevel > 0: + print>>stderr, 'reply:', repr(line) resp.append(line[4:].strip()) - code=line[:3] + code = line[:3] # Check that the error code is syntactically correct. # Don't attempt to read a continuation line if it is broken. try: @@ -352,17 +362,17 @@ errcode = -1 break # Check if multiline response. - if line[3:4]!="-": + if line[3:4] != "-": break errmsg = "\n".join(resp) if self.debuglevel > 0: - print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode,errmsg) + print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg) return errcode, errmsg def docmd(self, cmd, args=""): """Send a command, and return its response code.""" - self.putcmd(cmd,args) + self.putcmd(cmd, args) return self.getreply() # std smtp commands @@ -372,9 +382,9 @@ host. """ self.putcmd("helo", name or self.local_hostname) - (code,msg)=self.getreply() - self.helo_resp=msg - return (code,msg) + (code, msg) = self.getreply() + self.helo_resp = msg + return (code, msg) def ehlo(self, name=''): """ SMTP 'ehlo' command. @@ -383,19 +393,19 @@ """ self.esmtp_features = {} self.putcmd(self.ehlo_msg, name or self.local_hostname) - (code,msg)=self.getreply() + (code, msg) = self.getreply() # According to RFC1869 some (badly written) # MTA's will disconnect on an ehlo. Toss an exception if # that happens -ddm if code == -1 and len(msg) == 0: self.close() raise SMTPServerDisconnected("Server not connected") - self.ehlo_resp=msg + self.ehlo_resp = msg if code != 250: - return (code,msg) - self.does_esmtp=1 + return (code, msg) + self.does_esmtp = 1 #parse the ehlo response -ddm - resp=self.ehlo_resp.split('\n') + resp = self.ehlo_resp.split('\n') del resp[0] for each in resp: # To be able to communicate with as many SMTP servers as possible, @@ -415,16 +425,16 @@ # It's actually stricter, in that only spaces are allowed between # parameters, but were not going to check for that here. Note # that the space isn't present if there are no parameters. - m=re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?',each) + m = re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) if m: - feature=m.group("feature").lower() - params=m.string[m.end("feature"):].strip() + feature = m.group("feature").lower() + params = m.string[m.end("feature"):].strip() if feature == "auth": self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \ + " " + params else: - self.esmtp_features[feature]=params - return (code,msg) + self.esmtp_features[feature] = params + return (code, msg) def has_extn(self, opt): """Does the server support a given SMTP service extension?""" @@ -444,23 +454,23 @@ """SMTP 'noop' command -- doesn't do anything :>""" return self.docmd("noop") - def mail(self,sender,options=[]): + def mail(self, sender, options=[]): """SMTP 'mail' command -- begins mail xfer session.""" optionlist = '' if options and self.does_esmtp: optionlist = ' ' + ' '.join(options) - self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender) ,optionlist)) + self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist)) return self.getreply() - def rcpt(self,recip,options=[]): + def rcpt(self, recip, options=[]): """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" optionlist = '' if options and self.does_esmtp: optionlist = ' ' + ' '.join(options) - self.putcmd("rcpt","TO:%s%s" % (quoteaddr(recip),optionlist)) + self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist)) return self.getreply() - def data(self,msg): + def data(self, msg): """SMTP 'DATA' command -- sends message data to server. Automatically quotes lines beginning with a period per rfc821. @@ -469,26 +479,28 @@ response code received when the all data is sent. """ self.putcmd("data") - (code,repl)=self.getreply() - if self.debuglevel >0 : print>>stderr, "data:", (code,repl) + (code, repl) = self.getreply() + if self.debuglevel > 0: + print>>stderr, "data:", (code, repl) if code != 354: - raise SMTPDataError(code,repl) + raise SMTPDataError(code, repl) else: q = quotedata(msg) if q[-2:] != CRLF: q = q + CRLF q = q + "." + CRLF self.send(q) - (code,msg)=self.getreply() - if self.debuglevel >0 : print>>stderr, "data:", (code,msg) - return (code,msg) + (code, msg) = self.getreply() + if self.debuglevel > 0: + print>>stderr, "data:", (code, msg) + return (code, msg) def verify(self, address): """SMTP 'verify' command -- checks for address validity.""" self.putcmd("vrfy", quoteaddr(address)) return self.getreply() # a.k.a. - vrfy=verify + vrfy = verify def expn(self, address): """SMTP 'expn' command -- expands a mailing list.""" @@ -592,7 +604,7 @@ raise SMTPAuthenticationError(code, resp) return (code, resp) - def starttls(self, keyfile = None, certfile = None): + def starttls(self, keyfile=None, certfile=None): """Puts the connection to the SMTP server into TLS mode. If there has been no previous EHLO or HELO command this session, this @@ -695,22 +707,22 @@ for option in mail_options: esmtp_opts.append(option) - (code,resp) = self.mail(from_addr, esmtp_opts) + (code, resp) = self.mail(from_addr, esmtp_opts) if code != 250: self.rset() raise SMTPSenderRefused(code, resp, from_addr) - senderrs={} + senderrs = {} if isinstance(to_addrs, basestring): to_addrs = [to_addrs] for each in to_addrs: - (code,resp)=self.rcpt(each, rcpt_options) + (code, resp) = self.rcpt(each, rcpt_options) if (code != 250) and (code != 251): - senderrs[each]=(code,resp) - if len(senderrs)==len(to_addrs): + senderrs[each] = (code, resp) + if len(senderrs) == len(to_addrs): # the server refused all our recipients self.rset() raise SMTPRecipientsRefused(senderrs) - (code,resp) = self.data(msg) + (code, resp) = self.data(msg) if code != 250: self.rset() raise SMTPDataError(code, resp) @@ -744,16 +756,19 @@ are also optional - they can contain a PEM formatted private key and certificate chain file for the SSL connection. """ + + default_port = SMTP_SSL_PORT + def __init__(self, host='', port=0, local_hostname=None, keyfile=None, certfile=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.keyfile = keyfile self.certfile = certfile SMTP.__init__(self, host, port, local_hostname, timeout) - self.default_port = SMTP_SSL_PORT def _get_socket(self, host, port, timeout): - if self.debuglevel > 0: print>>stderr, 'connect:', (host, port) + if self.debuglevel > 0: + print>>stderr, 'connect:', (host, port) new_socket = socket.create_connection((host, port), timeout) new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile) self.file = SSLFakeFile(new_socket) @@ -781,11 +796,11 @@ ehlo_msg = "lhlo" - def __init__(self, host = '', port = LMTP_PORT, local_hostname = None): + def __init__(self, host='', port=LMTP_PORT, local_hostname=None): """Initialize a new instance.""" SMTP.__init__(self, host, port, local_hostname) - def connect(self, host = 'localhost', port = 0): + def connect(self, host='localhost', port=0): """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" if host[0] != '/': return SMTP.connect(self, host, port) @@ -795,13 +810,15 @@ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.connect(host) except socket.error, msg: - if self.debuglevel > 0: print>>stderr, 'connect fail:', host + if self.debuglevel > 0: + print>>stderr, 'connect fail:', host if self.sock: self.sock.close() self.sock = None raise socket.error, msg (code, msg) = self.getreply() - if self.debuglevel > 0: print>>stderr, "connect:", msg + if self.debuglevel > 0: + print>>stderr, "connect:", msg return (code, msg) @@ -815,7 +832,7 @@ return sys.stdin.readline().strip() fromaddr = prompt("From") - toaddrs = prompt("To").split(',') + toaddrs = prompt("To").split(',') print "Enter message, end with ^D:" msg = '' while 1: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -121,9 +121,11 @@ if e.errno != errno.ENOTCONN: raise # no, no connection yet + self._connected = False self._sslobj = None else: # yes, create the SSL object + self._connected = True self._sslobj = _ssl.sslwrap(self._sock, server_side, keyfile, certfile, cert_reqs, ssl_version, ca_certs, @@ -293,21 +295,36 @@ self._sslobj.do_handshake() - def connect(self, addr): - - """Connects to remote ADDR, and then wraps the connection in - an SSL channel.""" - + def _real_connect(self, addr, return_errno): # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. - if self._sslobj: + if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") - socket.connect(self, addr) self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs, self.ciphers) - if self.do_handshake_on_connect: - self.do_handshake() + try: + socket.connect(self, addr) + if self.do_handshake_on_connect: + self.do_handshake() + except socket_error as e: + if return_errno: + return e.errno + else: + self._sslobj = None + raise e + self._connected = True + return 0 + + def connect(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + self._real_connect(addr, False) + + def connect_ex(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + return self._real_connect(addr, True) def accept(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -396,6 +396,7 @@ import traceback import gc import signal +import errno # Exception classes used by this module. class CalledProcessError(Exception): @@ -427,7 +428,6 @@ else: import select _has_poll = hasattr(select, 'poll') - import errno import fcntl import pickle @@ -441,8 +441,15 @@ "check_output", "CalledProcessError"] if mswindows: - from _subprocess import CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP - __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP"]) + from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, + STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, + STD_ERROR_HANDLE, SW_HIDE, + STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW) + + __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", + "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", + "STD_ERROR_HANDLE", "SW_HIDE", + "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: @@ -726,7 +733,11 @@ stderr = None if self.stdin: if input: - self.stdin.write(input) + try: + self.stdin.write(input) + except IOError as e: + if e.errno != errno.EPIPE and e.errno != errno.EINVAL: + raise self.stdin.close() elif self.stdout: stdout = self.stdout.read() @@ -883,7 +894,7 @@ except pywintypes.error, e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really - # translate errno using _sys_errlist (or simliar), but + # translate errno using _sys_errlist (or similar), but # how can this be done from Python? raise WindowsError(*e.args) finally: @@ -956,7 +967,11 @@ if self.stdin: if input is not None: - self.stdin.write(input) + try: + self.stdin.write(input) + except IOError as e: + if e.errno != errno.EPIPE: + raise self.stdin.close() if self.stdout: @@ -1051,14 +1066,17 @@ errread, errwrite) - def _set_cloexec_flag(self, fd): + def _set_cloexec_flag(self, fd, cloexec=True): try: cloexec_flag = fcntl.FD_CLOEXEC except AttributeError: cloexec_flag = 1 old = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) + if cloexec: + fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) + else: + fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) def _close_fds(self, but): @@ -1128,21 +1146,25 @@ os.close(errpipe_read) # Dup fds for child - if p2cread is not None: - os.dup2(p2cread, 0) - if c2pwrite is not None: - os.dup2(c2pwrite, 1) - if errwrite is not None: - os.dup2(errwrite, 2) + def _dup2(a, b): + # dup2() removes the CLOEXEC flag but + # we must do it ourselves if dup2() + # would be a no-op (issue #10806). + if a == b: + self._set_cloexec_flag(a, False) + elif a is not None: + os.dup2(a, b) + _dup2(p2cread, 0) + _dup2(c2pwrite, 1) + _dup2(errwrite, 2) - # Close pipe fds. Make sure we don't close the same - # fd more than once, or standard fds. - if p2cread is not None and p2cread not in (0,): - os.close(p2cread) - if c2pwrite is not None and c2pwrite not in (p2cread, 1): - os.close(c2pwrite) - if errwrite is not None and errwrite not in (p2cread, c2pwrite, 2): - os.close(errwrite) + # Close pipe fds. Make sure we don't close the + # same fd more than once, or standard fds. + closed = { None } + for fd in [p2cread, c2pwrite, errwrite]: + if fd not in closed and fd > 2: + os.close(fd) + closed.add(fd) # Close all other fds, if asked for if close_fds: @@ -1194,7 +1216,11 @@ os.close(errpipe_read) if data != "": - _eintr_retry_call(os.waitpid, self.pid, 0) + try: + _eintr_retry_call(os.waitpid, self.pid, 0) + except OSError as e: + if e.errno != errno.ECHILD: + raise child_exception = pickle.loads(data) for fd in (p2cwrite, c2pread, errread): if fd is not None: @@ -1240,7 +1266,15 @@ """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: - pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0) + try: + pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0) + except OSError as e: + if e.errno != errno.ECHILD: + raise + # This happens if SIGCLD is set to be ignored or waiting + # for child processes has otherwise been disabled for our + # process. This child is dead, we can't get the status. + sts = 0 self._handle_exitstatus(sts) return self.returncode @@ -1317,9 +1351,16 @@ for fd, mode in ready: if mode & select.POLLOUT: chunk = input[input_offset : input_offset + _PIPE_BUF] - input_offset += os.write(fd, chunk) - if input_offset >= len(input): - close_unregister_and_remove(fd) + try: + input_offset += os.write(fd, chunk) + except OSError as e: + if e.errno == errno.EPIPE: + close_unregister_and_remove(fd) + else: + raise + else: + if input_offset >= len(input): + close_unregister_and_remove(fd) elif mode & select_POLLIN_POLLPRI: data = os.read(fd, 4096) if not data: @@ -1358,11 +1399,19 @@ if self.stdin in wlist: chunk = input[input_offset : input_offset + _PIPE_BUF] - bytes_written = os.write(self.stdin.fileno(), chunk) - input_offset += bytes_written - if input_offset >= len(input): - self.stdin.close() - write_set.remove(self.stdin) + try: + bytes_written = os.write(self.stdin.fileno(), chunk) + except OSError as e: + if e.errno == errno.EPIPE: + self.stdin.close() + write_set.remove(self.stdin) + else: + raise + else: + input_offset += bytes_written + if input_offset >= len(input): + self.stdin.close() + write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) diff --git a/lib-python/2.7/symbol.py b/lib-python/2.7/symbol.py --- a/lib-python/2.7/symbol.py +++ b/lib-python/2.7/symbol.py @@ -82,20 +82,19 @@ sliceop = 325 exprlist = 326 testlist = 327 -dictmaker = 328 -dictorsetmaker = 329 -classdef = 330 -arglist = 331 -argument = 332 -list_iter = 333 -list_for = 334 -list_if = 335 -comp_iter = 336 -comp_for = 337 -comp_if = 338 -testlist1 = 339 -encoding_decl = 340 -yield_expr = 341 +dictorsetmaker = 328 +classdef = 329 +arglist = 330 +argument = 331 +list_iter = 332 +list_for = 333 +list_if = 334 +comp_iter = 335 +comp_for = 336 +comp_if = 337 +testlist1 = 338 +encoding_decl = 339 +yield_expr = 340 #--end constants-- sym_name = {} diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -271,7 +271,7 @@ def _get_makefile_filename(): if _PYTHON_BUILD: return os.path.join(_PROJECT_BASE, "Makefile") - return os.path.join(get_path('stdlib'), "config", "Makefile") + return os.path.join(get_path('platstdlib'), "config", "Makefile") def _init_posix(vars): @@ -297,21 +297,6 @@ msg = msg + " (%s)" % e.strerror raise IOError(msg) - # On MacOSX we need to check the setting of the environment variable - # MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so - # it needs to be compatible. - # If it isn't set we set it to the configure-time value - if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in vars: - cfg_target = vars['MACOSX_DEPLOYMENT_TARGET'] - cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '') - if cur_target == '': - cur_target = cfg_target - os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target) - elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')): - msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" ' - 'during configure' % (cur_target, cfg_target)) - raise IOError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. @@ -616,9 +601,7 @@ # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. cfgvars = get_config_vars() - macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET') - if not macver: - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if 1: # Always calculate the release of the running machine, @@ -639,7 +622,6 @@ m = re.search( r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) - f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour diff --git a/lib-python/2.7/tarfile.py b/lib-python/2.7/tarfile.py --- a/lib-python/2.7/tarfile.py +++ b/lib-python/2.7/tarfile.py @@ -2239,10 +2239,14 @@ if hasattr(os, "symlink") and hasattr(os, "link"): # For systems that support symbolic and hard links. if tarinfo.issym(): + if os.path.lexists(targetpath): + os.unlink(targetpath) os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): + if os.path.lexists(targetpath): + os.unlink(targetpath) os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) diff --git a/lib-python/2.7/telnetlib.py b/lib-python/2.7/telnetlib.py --- a/lib-python/2.7/telnetlib.py +++ b/lib-python/2.7/telnetlib.py @@ -236,7 +236,7 @@ """ if self.debuglevel > 0: - print 'Telnet(%s,%d):' % (self.host, self.port), + print 'Telnet(%s,%s):' % (self.host, self.port), if args: print msg % args else: diff --git a/lib-python/2.7/test/cjkencodings/big5-utf8.txt b/lib-python/2.7/test/cjkencodings/big5-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/big5-utf8.txt @@ -0,0 +1,9 @@ +如何在 Python 中使用既有的 C library? + 在資訊科技快速發展的今天, 開發及測試軟體的速度是不容忽視的 +課題. 為加快開發及測試的速度, 我們便常希望能利用一些已開發好的 +library, 並有一個 fast prototyping 的 programming language 可 +供使用. 目前有許許多多的 library 是以 C 寫成, 而 Python 是一個 +fast prototyping 的 programming language. 故我們希望能將既有的 +C library 拿到 Python 的環境中測試及整合. 其中最主要也是我們所 +要討論的問題就是: + diff --git a/lib-python/2.7/test/cjkencodings/big5.txt b/lib-python/2.7/test/cjkencodings/big5.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/big5.txt @@ -0,0 +1,9 @@ +�p��b Python ���ϥάJ���� C library? +�@�b��T��ާֳt�o�i������, �}�o�δ��ճn�骺�t�׬O���e������ +���D. ���[�ֶ}�o�δ��ժ��t��, �ڭ̫K�`�Ʊ��Q�Τ@�Ǥw�}�o�n�� +library, �æ��@�� fast prototyping �� programming language �i +�Ѩϥ�. �ثe���\�\�h�h�� library �O�H C �g��, �� Python �O�@�� +fast prototyping �� programming language. �G�ڭ̧Ʊ��N�J���� +C library ���� Python �����Ҥ����դξ�X. �䤤�̥D�n�]�O�ڭ̩� +�n�Q�ת����D�N�O: + diff --git a/lib-python/2.7/test/cjkencodings/big5hkscs-utf8.txt b/lib-python/2.7/test/cjkencodings/big5hkscs-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/big5hkscs-utf8.txt @@ -0,0 +1,2 @@ +𠄌Ě鵮罓洆 +ÊÊ̄ê êê̄ diff --git a/lib-python/2.7/test/cjkencodings/big5hkscs.txt b/lib-python/2.7/test/cjkencodings/big5hkscs.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/big5hkscs.txt @@ -0,0 +1,2 @@ +�E�\�s�ڍ� +�f�b�� ���� diff --git a/lib-python/2.7/test/cjkencodings/cp949-utf8.txt b/lib-python/2.7/test/cjkencodings/cp949-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/cp949-utf8.txt @@ -0,0 +1,9 @@ +똠방각하 펲시콜라 + +㉯㉯납!! 因九月패믤릔궈 ⓡⓖ훀¿¿¿ 긍뒙 ⓔ뎨 ㉯. . +亞영ⓔ능횹 . . . . 서울뤄 뎐학乙 家훀 ! ! !ㅠ.ㅠ +흐흐흐 ㄱㄱㄱ☆ㅠ_ㅠ 어릨 탸콰긐 뎌응 칑九들乙 ㉯드긐 +설릌 家훀 . . . . 굴애쉌 ⓔ궈 ⓡ릘㉱긐 因仁川女中까즼 +와쒀훀 ! ! 亞영ⓔ 家능궈 ☆上관 없능궈능 亞능뒈훀 글애듴 +ⓡ려듀九 싀풔숴훀 어릨 因仁川女中싁⑨들앜!! ㉯㉯납♡ ⌒⌒* + diff --git a/lib-python/2.7/test/cjkencodings/cp949.txt b/lib-python/2.7/test/cjkencodings/cp949.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/cp949.txt @@ -0,0 +1,9 @@ +�c�氢�� �����ݶ� + +������!! �������В�p�� �ި��R������ ���� �ѵ� ��. . +䬿��Ѵ��� . . . . ����� ������ ʫ�R ! ! !��.�� +������ �������٤�_�� � ����O ���� �h������ ����O +���j ʫ�R . . . . ���֚f �ѱ� �ސt�ƒO ���������� +�;��R ! ! 䬿��� ʫ�ɱ� ��߾�� ���ɱŴ� 䬴ɵ��R �۾֊� +�޷����� ��Ǵ���R � ����������Ĩ���!! �������� �ҡ�* + diff --git a/lib-python/2.7/test/cjkencodings/euc_jisx0213-utf8.txt b/lib-python/2.7/test/cjkencodings/euc_jisx0213-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_jisx0213-utf8.txt @@ -0,0 +1,8 @@ +Python の開発は、1990 年ごろから開始されています。 +開発者の Guido van Rossum は教育用のプログラミング言語「ABC」の開発に参加していましたが、ABC は実用上の目的にはあまり適していませんでした。 +このため、Guido はより実用的なプログラミング言語の開発を開始し、英国 BBS 放送のコメディ番組「モンティ パイソン」のファンである Guido はこの言語を「Python」と名づけました。 +このような背景から生まれた Python の言語設計は、「シンプル」で「習得が容易」という目標に重点が置かれています。 +多くのスクリプト系言語ではユーザの目先の利便性を優先して色々な機能を言語要素として取り入れる場合が多いのですが、Python ではそういった小細工が追加されることはあまりありません。 +言語自体の機能は最小限に押さえ、必要な機能は拡張モジュールとして追加する、というのが Python のポリシーです。 + +ノか゚ ト゚ トキ喝塀 𡚴𪎌 麀齁𩛰 diff --git a/lib-python/2.7/test/cjkencodings/euc_jisx0213.txt b/lib-python/2.7/test/cjkencodings/euc_jisx0213.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_jisx0213.txt @@ -0,0 +1,8 @@ +Python �γ�ȯ�ϡ�1990 ǯ�����鳫�Ϥ���Ƥ��ޤ��� +��ȯ�Ԥ� Guido van Rossum �϶����ѤΥץ���ߥ󥰸����ABC�פγ�ȯ�˻��ä��Ƥ��ޤ�������ABC �ϼ��Ѿ����Ū�ˤϤ��ޤ�Ŭ���Ƥ��ޤ���Ǥ����� +���Τ��ᡢGuido �Ϥ�����Ū�ʥץ���ߥ󥰸���γ�ȯ�򳫻Ϥ����ѹ� BBS �����Υ���ǥ����ȡ֥��ƥ� �ѥ�����פΥե���Ǥ��� Guido �Ϥ��θ�����Python�פ�̾�Ť��ޤ����� +���Τ褦���طʤ������ޤ줿 Python �θ����߷פϡ��֥���ץ�פǡֽ������ưספȤ�����ɸ�˽������֤���Ƥ��ޤ��� +¿���Υ�����ץȷϸ���Ǥϥ桼�����������������ͥ�褷�ƿ����ʵ�ǽ��������ǤȤ��Ƽ��������礬¿���ΤǤ�����Python �ǤϤ������ä����ٹ����ɲä���뤳�ȤϤ��ޤꤢ��ޤ��� +���켫�Τε�ǽ�ϺǾ��¤˲�������ɬ�פʵ�ǽ�ϳ�ĥ�⥸�塼��Ȥ����ɲä��롢�Ȥ����Τ� Python �Υݥꥷ���Ǥ��� + +�Τ� �� �ȥ����� ���� ��ԏ���� diff --git a/lib-python/2.7/test/cjkencodings/euc_jp-utf8.txt b/lib-python/2.7/test/cjkencodings/euc_jp-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_jp-utf8.txt @@ -0,0 +1,7 @@ +Python の開発は、1990 年ごろから開始されています。 +開発者の Guido van Rossum は教育用のプログラミング言語「ABC」の開発に参加していましたが、ABC は実用上の目的にはあまり適していませんでした。 +このため、Guido はより実用的なプログラミング言語の開発を開始し、英国 BBS 放送のコメディ番組「モンティ パイソン」のファンである Guido はこの言語を「Python」と名づけました。 +このような背景から生まれた Python の言語設計は、「シンプル」で「習得が容易」という目標に重点が置かれています。 +多くのスクリプト系言語ではユーザの目先の利便性を優先して色々な機能を言語要素として取り入れる場合が多いのですが、Python ではそういった小細工が追加されることはあまりありません。 +言語自体の機能は最小限に押さえ、必要な機能は拡張モジュールとして追加する、というのが Python のポリシーです。 + diff --git a/lib-python/2.7/test/cjkencodings/euc_jp.txt b/lib-python/2.7/test/cjkencodings/euc_jp.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_jp.txt @@ -0,0 +1,7 @@ +Python �γ�ȯ�ϡ�1990 ǯ�����鳫�Ϥ���Ƥ��ޤ��� +��ȯ�Ԥ� Guido van Rossum �϶����ѤΥץ���ߥ󥰸����ABC�פγ�ȯ�˻��ä��Ƥ��ޤ�������ABC �ϼ��Ѿ����Ū�ˤϤ��ޤ�Ŭ���Ƥ��ޤ���Ǥ����� +���Τ��ᡢGuido �Ϥ�����Ū�ʥץ���ߥ󥰸���γ�ȯ�򳫻Ϥ����ѹ� BBS �����Υ���ǥ����ȡ֥��ƥ� �ѥ�����פΥե���Ǥ��� Guido �Ϥ��θ�����Python�פ�̾�Ť��ޤ����� +���Τ褦���طʤ������ޤ줿 Python �θ����߷פϡ��֥���ץ�פǡֽ������ưספȤ�����ɸ�˽������֤���Ƥ��ޤ��� +¿���Υ�����ץȷϸ���Ǥϥ桼�����������������ͥ�褷�ƿ����ʵ�ǽ��������ǤȤ��Ƽ��������礬¿���ΤǤ�����Python �ǤϤ������ä����ٹ����ɲä���뤳�ȤϤ��ޤꤢ��ޤ��� +���켫�Τε�ǽ�ϺǾ��¤˲�������ɬ�פʵ�ǽ�ϳ�ĥ�⥸�塼��Ȥ����ɲä��롢�Ȥ����Τ� Python �Υݥꥷ���Ǥ��� + diff --git a/lib-python/2.7/test/cjkencodings/euc_kr-utf8.txt b/lib-python/2.7/test/cjkencodings/euc_kr-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_kr-utf8.txt @@ -0,0 +1,7 @@ +◎ 파이썬(Python)은 배우기 쉽고, 강력한 프로그래밍 언어입니다. 파이썬은 +효율적인 고수준 데이터 구조와 간단하지만 효율적인 객체지향프로그래밍을 +지원합니다. 파이썬의 우아(優雅)한 문법과 동적 타이핑, 그리고 인터프리팅 +환경은 파이썬을 스크립팅과 여러 분야에서와 대부분의 플랫폼에서의 빠른 +애플리케이션 개발을 할 수 있는 이상적인 언어로 만들어줍니다. + +☆첫가끝: 날아라 쓔쓔쓩~ 닁큼! 뜽금없이 전홥니다. 뷁. 그런거 읎다. diff --git a/lib-python/2.7/test/cjkencodings/euc_kr.txt b/lib-python/2.7/test/cjkencodings/euc_kr.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/euc_kr.txt @@ -0,0 +1,7 @@ +�� ���̽�(Python)�� ���� ����, ������ ���α׷��� ����Դϴ�. ���̽��� +ȿ������ ����� ������ ������ ���������� ȿ������ ��ü�������α׷����� +�����մϴ�. ���̽��� ���(���)�� ������ ���� Ÿ����, �׸��� ���������� +ȯ���� ���̽��� ��ũ���ð� ���� �о߿����� ��κ��� �÷��������� ���� +���ø����̼� ������ �� �� �ִ� �̻����� ���� ������ݴϴ�. + +��ù����: ���ƶ� �Ԥ��ФԤԤ��ФԾ�~ �Ԥ��Ҥ�ŭ! �Ԥ��Ѥ��ݾ��� ���Ԥ��Ȥ��ϴ�. �Ԥ��Τ�. �׷��� �Ԥ��Ѥ���. diff --git a/lib-python/2.7/test/cjkencodings/gb18030-utf8.txt b/lib-python/2.7/test/cjkencodings/gb18030-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gb18030-utf8.txt @@ -0,0 +1,15 @@ +Python(派森)语言是一种功能强大而完善的通用型计算机程序设计语言, +已经具有十多年的发展历史,成熟且稳定。这种语言具有非常简捷而清晰 +的语法特点,适合完成各种高层任务,几乎可以在所有的操作系统中 +运行。这种语言简单而强大,适合各种人士学习使用。目前,基于这 +种语言的相关技术正在飞速的发展,用户数量急剧扩大,相关的资源非常多。 +如何在 Python 中使用既有的 C library? + 在資訊科技快速發展的今天, 開發及測試軟體的速度是不容忽視的 +課題. 為加快開發及測試的速度, 我們便常希望能利用一些已開發好的 +library, 並有一個 fast prototyping 的 programming language 可 +供使用. 目前有許許多多的 library 是以 C 寫成, 而 Python 是一個 +fast prototyping 的 programming language. 故我們希望能將既有的 +C library 拿到 Python 的環境中測試及整合. 其中最主要也是我們所 +要討論的問題就是: +파이썬은 강력한 기능을 지닌 범용 컴퓨터 프로그래밍 언어다. + diff --git a/lib-python/2.7/test/cjkencodings/gb18030.txt b/lib-python/2.7/test/cjkencodings/gb18030.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gb18030.txt @@ -0,0 +1,15 @@ +Python����ɭ��������һ�ֹ���ǿ������Ƶ�ͨ���ͼ��������������ԣ� +�Ѿ�����ʮ����ķ�չ��ʷ���������ȶ����������Ծ��зdz���ݶ����� +���﷨�ص㣬�ʺ���ɸ��ָ߲����񣬼������������еIJ���ϵͳ�� +���С��������Լ򵥶�ǿ���ʺϸ�����ʿѧϰʹ�á�Ŀǰ�������� +�����Ե���ؼ������ڷ��ٵķ�չ���û���������������ص���Դ�dz��ࡣ +����� Python ��ʹ�ü��е� C library? +�����YӍ�Ƽ����ٰlչ�Ľ���, �_�l���yԇܛ�w���ٶ��Dz��ݺ�ҕ�� +�n�}. ��ӿ��_�l���yԇ���ٶ�, �҂��㳣ϣ��������һЩ���_�l�õ� +library, �K��һ�� fast prototyping �� programming language �� +��ʹ��. Ŀǰ���S�S���� library ���� C ����, �� Python ��һ�� +fast prototyping �� programming language. ���҂�ϣ���܌����е� +C library �õ� Python �ĭh���Мyԇ������. ��������ҪҲ���҂��� +ҪӑՓ�Ć��}����: +�5�1�3�3�2�1�3�1 �7�6�0�4�6�3 �8�5�8�6�3�5 �3�1�9�5 �0�9�3�0 �4�3�5�7�5�5 �5�5�0�9�8�9�9�3�0�4 �2�9�2�5�9�9. + diff --git a/lib-python/2.7/test/cjkencodings/gb2312-utf8.txt b/lib-python/2.7/test/cjkencodings/gb2312-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gb2312-utf8.txt @@ -0,0 +1,6 @@ +Python(派森)语言是一种功能强大而完善的通用型计算机程序设计语言, +已经具有十多年的发展历史,成熟且稳定。这种语言具有非常简捷而清晰 +的语法特点,适合完成各种高层任务,几乎可以在所有的操作系统中 +运行。这种语言简单而强大,适合各种人士学习使用。目前,基于这 +种语言的相关技术正在飞速的发展,用户数量急剧扩大,相关的资源非常多。 + diff --git a/lib-python/2.7/test/cjkencodings/gb2312.txt b/lib-python/2.7/test/cjkencodings/gb2312.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gb2312.txt @@ -0,0 +1,6 @@ +Python����ɭ��������һ�ֹ���ǿ������Ƶ�ͨ���ͼ��������������ԣ� +�Ѿ�����ʮ����ķ�չ��ʷ���������ȶ����������Ծ��зdz���ݶ����� +���﷨�ص㣬�ʺ���ɸ��ָ߲����񣬼������������еIJ���ϵͳ�� +���С��������Լ򵥶�ǿ���ʺϸ�����ʿѧϰʹ�á�Ŀǰ�������� +�����Ե���ؼ������ڷ��ٵķ�չ���û���������������ص���Դ�dz��ࡣ + diff --git a/lib-python/2.7/test/cjkencodings/gbk-utf8.txt b/lib-python/2.7/test/cjkencodings/gbk-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gbk-utf8.txt @@ -0,0 +1,14 @@ +Python(派森)语言是一种功能强大而完善的通用型计算机程序设计语言, +已经具有十多年的发展历史,成熟且稳定。这种语言具有非常简捷而清晰 +的语法特点,适合完成各种高层任务,几乎可以在所有的操作系统中 +运行。这种语言简单而强大,适合各种人士学习使用。目前,基于这 +种语言的相关技术正在飞速的发展,用户数量急剧扩大,相关的资源非常多。 +如何在 Python 中使用既有的 C library? + 在資訊科技快速發展的今天, 開發及測試軟體的速度是不容忽視的 +課題. 為加快開發及測試的速度, 我們便常希望能利用一些已開發好的 +library, 並有一個 fast prototyping 的 programming language 可 +供使用. 目前有許許多多的 library 是以 C 寫成, 而 Python 是一個 +fast prototyping 的 programming language. 故我們希望能將既有的 +C library 拿到 Python 的環境中測試及整合. 其中最主要也是我們所 +要討論的問題就是: + diff --git a/lib-python/2.7/test/cjkencodings/gbk.txt b/lib-python/2.7/test/cjkencodings/gbk.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/gbk.txt @@ -0,0 +1,14 @@ +Python����ɭ��������һ�ֹ���ǿ������Ƶ�ͨ���ͼ��������������ԣ� +�Ѿ�����ʮ����ķ�չ��ʷ���������ȶ����������Ծ��зdz���ݶ����� +���﷨�ص㣬�ʺ���ɸ��ָ߲����񣬼������������еIJ���ϵͳ�� +���С��������Լ򵥶�ǿ���ʺϸ�����ʿѧϰʹ�á�Ŀǰ�������� +�����Ե���ؼ������ڷ��ٵķ�չ���û���������������ص���Դ�dz��ࡣ +����� Python ��ʹ�ü��е� C library? +�����YӍ�Ƽ����ٰlչ�Ľ���, �_�l���yԇܛ�w���ٶ��Dz��ݺ�ҕ�� +�n�}. ��ӿ��_�l���yԇ���ٶ�, �҂��㳣ϣ��������һЩ���_�l�õ� +library, �K��һ�� fast prototyping �� programming language �� +��ʹ��. Ŀǰ���S�S���� library ���� C ����, �� Python ��һ�� +fast prototyping �� programming language. ���҂�ϣ���܌����е� +C library �õ� Python �ĭh���Мyԇ������. ��������ҪҲ���҂��� +ҪӑՓ�Ć��}����: + diff --git a/lib-python/2.7/test/cjkencodings/hz-utf8.txt b/lib-python/2.7/test/cjkencodings/hz-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/hz-utf8.txt @@ -0,0 +1,2 @@ +This sentence is in ASCII. +The next sentence is in GB.己所不欲,勿施於人。Bye. diff --git a/lib-python/2.7/test/cjkencodings/hz.txt b/lib-python/2.7/test/cjkencodings/hz.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/hz.txt @@ -0,0 +1,2 @@ +This sentence is in ASCII. +The next sentence is in GB.~{<:Ky2;S{#,NpJ)l6HK!#~}Bye. diff --git a/lib-python/2.7/test/cjkencodings/johab-utf8.txt b/lib-python/2.7/test/cjkencodings/johab-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/johab-utf8.txt @@ -0,0 +1,9 @@ +똠방각하 펲시콜라 + +㉯㉯납!! 因九月패믤릔궈 ⓡⓖ훀¿¿¿ 긍뒙 ⓔ뎨 ㉯. . +亞영ⓔ능횹 . . . . 서울뤄 뎐학乙 家훀 ! ! !ㅠ.ㅠ +흐흐흐 ㄱㄱㄱ☆ㅠ_ㅠ 어릨 탸콰긐 뎌응 칑九들乙 ㉯드긐 +설릌 家훀 . . . . 굴애쉌 ⓔ궈 ⓡ릘㉱긐 因仁川女中까즼 +와쒀훀 ! ! 亞영ⓔ 家능궈 ☆上관 없능궈능 亞능뒈훀 글애듴 +ⓡ려듀九 싀풔숴훀 어릨 因仁川女中싁⑨들앜!! ㉯㉯납♡ ⌒⌒* + diff --git a/lib-python/2.7/test/cjkencodings/johab.txt b/lib-python/2.7/test/cjkencodings/johab.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/johab.txt @@ -0,0 +1,9 @@ +���w�b�a �\��ũ�a + +�����s!! �g��Ú������ �����zٯٯٯ �w�� �ѕ� ��. . +�<�w�ѓw�s . . . . �ᶉ�� �e�b�� �;�z ! ! !�A.�A +�a�a�a �A�A�A�i�A_�A �៚ ȡ���z �a�w ×✗i�� ���a�z +��z �;�z . . . . ������ �ъ� �ޟ��‹z �g�b�I����a�� +�����z ! ! �<�w�� �;�w�� �i꾉� ���w���w �<�w���z �i���z +�ޝa�A� ��Ρ���z �៚ �g�b�I���鯂��i�z!! �����sٽ �b�b* + diff --git a/lib-python/2.7/test/cjkencodings/shift_jis-utf8.txt b/lib-python/2.7/test/cjkencodings/shift_jis-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/shift_jis-utf8.txt @@ -0,0 +1,7 @@ +Python の開発は、1990 年ごろから開始されています。 +開発者の Guido van Rossum は教育用のプログラミング言語「ABC」の開発に参加していましたが、ABC は実用上の目的にはあまり適していませんでした。 +このため、Guido はより実用的なプログラミング言語の開発を開始し、英国 BBS 放送のコメディ番組「モンティ パイソン」のファンである Guido はこの言語を「Python」と名づけました。 +このような背景から生まれた Python の言語設計は、「シンプル」で「習得が容易」という目標に重点が置かれています。 +多くのスクリプト系言語ではユーザの目先の利便性を優先して色々な機能を言語要素として取り入れる場合が多いのですが、Python ではそういった小細工が追加されることはあまりありません。 +言語自体の機能は最小限に押さえ、必要な機能は拡張モジュールとして追加する、というのが Python のポリシーです。 + diff --git a/lib-python/2.7/test/cjkencodings/shift_jis.txt b/lib-python/2.7/test/cjkencodings/shift_jis.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/shift_jis.txt @@ -0,0 +1,7 @@ +Python �̊J���́A1990 �N���납��J�n����Ă��܂��B +�J���҂� Guido van Rossum �͋���p�̃v���O���~���O����uABC�v�̊J���ɎQ�����Ă��܂������AABC �͎��p��̖ړI�ɂ͂��܂�K���Ă��܂���ł����B +���̂��߁AGuido �͂����p�I�ȃv���O���~���O����̊J�����J�n���A�p�� BBS �����̃R���f�B�ԑg�u�����e�B �p�C�\���v�̃t�@���ł��� Guido �͂��̌�����uPython�v�Ɩ��Â��܂����B +���̂悤�Ȕw�i���琶�܂ꂽ Python �̌���݌v�́A�u�V���v���v�Łu�K�����e�Ձv�Ƃ����ڕW�ɏd�_���u����Ă��܂��B +�����̃X�N���v�g�n����ł̓��[�U�̖ڐ�̗��֐���D�悵�ĐF�X�ȋ@�\������v�f�Ƃ��Ď������ꍇ�������̂ł����APython �ł͂������������׍H���lj�����邱�Ƃ͂��܂肠��܂���B +���ꎩ�̂̋@�\�͍ŏ����ɉ������A�K�v�ȋ@�\�͊g�����W���[���Ƃ��Ēlj�����A�Ƃ����̂� Python �̃|���V�[�ł��B + diff --git a/lib-python/2.7/test/cjkencodings/shift_jisx0213-utf8.txt b/lib-python/2.7/test/cjkencodings/shift_jisx0213-utf8.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/shift_jisx0213-utf8.txt @@ -0,0 +1,8 @@ +Python の開発は、1990 年ごろから開始されています。 +開発者の Guido van Rossum は教育用のプログラミング言語「ABC」の開発に参加していましたが、ABC は実用上の目的にはあまり適していませんでした。 +このため、Guido はより実用的なプログラミング言語の開発を開始し、英国 BBS 放送のコメディ番組「モンティ パイソン」のファンである Guido はこの言語を「Python」と名づけました。 +このような背景から生まれた Python の言語設計は、「シンプル」で「習得が容易」という目標に重点が置かれています。 +多くのスクリプト系言語ではユーザの目先の利便性を優先して色々な機能を言語要素として取り入れる場合が多いのですが、Python ではそういった小細工が追加されることはあまりありません。 +言語自体の機能は最小限に押さえ、必要な機能は拡張モジュールとして追加する、というのが Python のポリシーです。 + +ノか゚ ト゚ トキ喝塀 𡚴𪎌 麀齁𩛰 diff --git a/lib-python/2.7/test/cjkencodings/shift_jisx0213.txt b/lib-python/2.7/test/cjkencodings/shift_jisx0213.txt new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/cjkencodings/shift_jisx0213.txt @@ -0,0 +1,8 @@ +Python �̊J���́A1990 �N���납��J�n����Ă��܂��B +�J���҂� Guido van Rossum �͋���p�̃v���O���~���O����uABC�v�̊J���ɎQ�����Ă��܂������AABC �͎��p��̖ړI�ɂ͂��܂�K���Ă��܂���ł����B +���̂��߁AGuido �͂����p�I�ȃv���O���~���O����̊J�����J�n���A�p�� BBS �����̃R���f�B�ԑg�u�����e�B �p�C�\���v�̃t�@���ł��� Guido �͂��̌�����uPython�v�Ɩ��Â��܂����B +���̂悤�Ȕw�i���琶�܂ꂽ Python �̌���݌v�́A�u�V���v���v�Łu�K�����e�Ձv�Ƃ����ڕW�ɏd�_���u����Ă��܂��B +�����̃X�N���v�g�n����ł̓��[�U�̖ڐ�̗��֐���D�悵�ĐF�X�ȋ@�\������v�f�Ƃ��Ď������ꍇ�������̂ł����APython �ł͂������������׍H���lj�����邱�Ƃ͂��܂肠��܂���B +���ꎩ�̂̋@�\�͍ŏ����ɉ������A�K�v�ȋ@�\�͊g�����W���[���Ƃ��Ēlj�����A�Ƃ����̂� Python �̃|���V�[�ł��B + +�m�� �� �g�L�K�y ���� ������ diff --git a/lib-python/2.7/test/cjkencodings_test.py b/lib-python/2.7/test/cjkencodings_test.py deleted file mode 100644 --- a/lib-python/2.7/test/cjkencodings_test.py +++ /dev/null @@ -1,1019 +0,0 @@ -teststring = { -'big5': ( -"\xa6\x70\xa6\xf3\xa6\x62\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xa4" -"\xa8\xcf\xa5\xce\xac\x4a\xa6\xb3\xaa\xba\x20\x43\x20\x6c\x69\x62" -"\x72\x61\x72\x79\x3f\x0a\xa1\x40\xa6\x62\xb8\xea\xb0\x54\xac\xec" -"\xa7\xde\xa7\xd6\xb3\x74\xb5\x6f\xae\x69\xaa\xba\xa4\xb5\xa4\xd1" -"\x2c\x20\xb6\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xb3\x6e\xc5\xe9" -"\xaa\xba\xb3\x74\xab\xd7\xac\x4f\xa4\xa3\xae\x65\xa9\xbf\xb5\xf8" -"\xaa\xba\x0a\xbd\xd2\xc3\x44\x2e\x20\xac\xb0\xa5\x5b\xa7\xd6\xb6" -"\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xaa\xba\xb3\x74\xab\xd7\x2c" -"\x20\xa7\xda\xad\xcc\xab\x4b\xb1\x60\xa7\xc6\xb1\xe6\xaf\xe0\xa7" -"\x51\xa5\xce\xa4\x40\xa8\xc7\xa4\x77\xb6\x7d\xb5\x6f\xa6\x6e\xaa" -"\xba\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xa8\xc3\xa6\xb3\xa4" -"\x40\xad\xd3\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79" -"\x70\x69\x6e\x67\x20\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d" -"\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20\xa5\x69\x0a" -"\xa8\xd1\xa8\xcf\xa5\xce\x2e\x20\xa5\xd8\xab\x65\xa6\xb3\xb3\x5c" -"\xb3\x5c\xa6\x68\xa6\x68\xaa\xba\x20\x6c\x69\x62\x72\x61\x72\x79" -"\x20\xac\x4f\xa5\x48\x20\x43\x20\xbc\x67\xa6\xa8\x2c\x20\xa6\xd3" -"\x20\x50\x79\x74\x68\x6f\x6e\x20\xac\x4f\xa4\x40\xad\xd3\x0a\x66" -"\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20" -"\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c" -"\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xac\x47\xa7\xda\xad\xcc\xa7" -"\xc6\xb1\xe6\xaf\xe0\xb1\x4e\xac\x4a\xa6\xb3\xaa\xba\x0a\x43\x20" -"\x6c\x69\x62\x72\x61\x72\x79\x20\xae\xb3\xa8\xec\x20\x50\x79\x74" -"\x68\x6f\x6e\x20\xaa\xba\xc0\xf4\xb9\xd2\xa4\xa4\xb4\xfa\xb8\xd5" -"\xa4\xce\xbe\xe3\xa6\x58\x2e\x20\xa8\xe4\xa4\xa4\xb3\xcc\xa5\x44" -"\xad\x6e\xa4\x5d\xac\x4f\xa7\xda\xad\xcc\xa9\xd2\x0a\xad\x6e\xb0" -"\x51\xbd\xd7\xaa\xba\xb0\xdd\xc3\x44\xb4\x4e\xac\x4f\x3a\x0a\x0a", -"\xe5\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e" -"\x20\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89" -"\xe7\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3" -"\x80\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a" -"\x80\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84" -"\xe4\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f" -"\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84" -"\xe9\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5" -"\xbf\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e" -"\x20\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc" -"\xe5\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5" -"\xba\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8" -"\xe5\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4" -"\xb8\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5" -"\xbd\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8" -"\xa6\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20" -"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20" -"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67" -"\x75\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7" -"\x94\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1" -"\xe8\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62" -"\x72\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf" -"\xab\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e" -"\x20\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20" -"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20" -"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67" -"\x75\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5" -"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c" -"\x89\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6" -"\x8b\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84" -"\xe7\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5" -"\x8f\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad" -"\xe6\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6" -"\x88\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8" -"\xab\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98" -"\xaf\x3a\x0a\x0a"), -'big5hkscs': ( -"\x88\x45\x88\x5c\x8a\x73\x8b\xda\x8d\xd8\x0a\x88\x66\x88\x62\x88" -"\xa7\x20\x88\xa7\x88\xa3\x0a", -"\xf0\xa0\x84\x8c\xc4\x9a\xe9\xb5\xae\xe7\xbd\x93\xe6\xb4\x86\x0a" -"\xc3\x8a\xc3\x8a\xcc\x84\xc3\xaa\x20\xc3\xaa\xc3\xaa\xcc\x84\x0a"), -'cp949': ( -"\x8c\x63\xb9\xe6\xb0\xa2\xc7\xcf\x20\xbc\x84\xbd\xc3\xc4\xdd\xb6" -"\xf3\x0a\x0a\xa8\xc0\xa8\xc0\xb3\xb3\x21\x21\x20\xec\xd7\xce\xfa" -"\xea\xc5\xc6\xd0\x92\xe6\x90\x70\xb1\xc5\x20\xa8\xde\xa8\xd3\xc4" -"\x52\xa2\xaf\xa2\xaf\xa2\xaf\x20\xb1\xe0\x8a\x96\x20\xa8\xd1\xb5" -"\xb3\x20\xa8\xc0\x2e\x20\x2e\x0a\xe4\xac\xbf\xb5\xa8\xd1\xb4\xc9" -"\xc8\xc2\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xbc\xad\xbf\xef\xb7" -"\xef\x20\xb5\xaf\xc7\xd0\xeb\xe0\x20\xca\xab\xc4\x52\x20\x21\x20" -"\x21\x20\x21\xa4\xd0\x2e\xa4\xd0\x0a\xc8\xe5\xc8\xe5\xc8\xe5\x20" -"\xa4\xa1\xa4\xa1\xa4\xa1\xa1\xd9\xa4\xd0\x5f\xa4\xd0\x20\xbe\xee" -"\x90\x8a\x20\xc5\xcb\xc4\xe2\x83\x4f\x20\xb5\xae\xc0\xc0\x20\xaf" -"\x68\xce\xfa\xb5\xe9\xeb\xe0\x20\xa8\xc0\xb5\xe5\x83\x4f\x0a\xbc" -"\xb3\x90\x6a\x20\xca\xab\xc4\x52\x20\x2e\x20\x2e\x20\x2e\x20\x2e" -"\x20\xb1\xbc\xbe\xd6\x9a\x66\x20\xa8\xd1\xb1\xc5\x20\xa8\xde\x90" -"\x74\xa8\xc2\x83\x4f\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9" -"\xb1\xee\xa3\x8e\x0a\xbf\xcd\xbe\xac\xc4\x52\x20\x21\x20\x21\x20" -"\xe4\xac\xbf\xb5\xa8\xd1\x20\xca\xab\xb4\xc9\xb1\xc5\x20\xa1\xd9" -"\xdf\xbe\xb0\xfc\x20\xbe\xf8\xb4\xc9\xb1\xc5\xb4\xc9\x20\xe4\xac" -"\xb4\xc9\xb5\xd8\xc4\x52\x20\xb1\xdb\xbe\xd6\x8a\xdb\x0a\xa8\xde" -"\xb7\xc1\xb5\xe0\xce\xfa\x20\x9a\xc3\xc7\xb4\xbd\xa4\xc4\x52\x20" -"\xbe\xee\x90\x8a\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9\x9a" -"\xc4\xa8\xef\xb5\xe9\x9d\xda\x21\x21\x20\xa8\xc0\xa8\xc0\xb3\xb3" -"\xa2\xbd\x20\xa1\xd2\xa1\xd2\x2a\x0a\x0a", -"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2" -"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89" -"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88" -"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1" -"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d" -"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20" -"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a" -"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8" -"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae" -"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85" -"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3" -"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20" -"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90" -"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93" -"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec" -"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e" -"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2" -"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea" -"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4" -"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed" -"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94" -"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8" -"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a" -"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea" -"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb" -"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed" -"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81" -"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb" -"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82" -"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"), -'euc_jisx0213': ( -"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1" -"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9" -"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9" -"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64" -"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6" -"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5" -"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7" -"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6" -"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20" -"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb" -"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4" -"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4" -"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4" -"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5" -"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4" -"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1" -"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3" -"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3" -"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4" -"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75" -"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2" -"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5" -"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4" -"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4" -"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8" -"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5" -"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4" -"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9" -"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4" -"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce" -"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec" -"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8" -"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7" -"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0" -"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea" -"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4" -"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e" -"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe" -"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4" -"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4" -"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab" -"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2" -"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca" -"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5" -"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9" -"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50" -"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc" -"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a\xa5\xce\xa4\xf7\x20\xa5\xfe\x20" -"\xa5\xc8\xa5\xad\xaf\xac\xaf\xda\x20\xcf\xe3\x8f\xfe\xd8\x20\x8f" -"\xfe\xd4\x8f\xfe\xe8\x8f\xfc\xd6\x0a", -"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81" -"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b" -"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3" -"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3" -"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73" -"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8" -"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3" -"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80" -"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3" -"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80" -"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8" -"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf" -"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3" -"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81" -"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81" -"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20" -"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7" -"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83" -"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e" -"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5" -"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42" -"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3" -"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80" -"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83" -"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae" -"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3" -"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79" -"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5" -"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a" -"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8" -"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81" -"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81" -"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97" -"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5" -"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81" -"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab" -"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3" -"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80" -"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82" -"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80" -"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3" -"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88" -"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88" -"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6" -"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96" -"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5" -"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81" -"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e" -"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84" -"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3" -"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82" -"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe" -"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3" -"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4" -"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c" -"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95" -"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83" -"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8" -"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3" -"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81" -"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3" -"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81" -"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20" -"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6" -"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80" -"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"), -'euc_jp': ( -"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1" -"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9" -"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9" -"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64" -"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6" -"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5" -"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7" -"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6" -"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20" -"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb" -"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4" -"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4" -"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4" -"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5" -"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4" -"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1" -"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3" -"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3" -"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4" -"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75" -"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2" -"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5" -"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4" -"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4" -"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8" -"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5" -"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4" -"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9" -"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4" -"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce" -"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec" -"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8" -"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7" -"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0" -"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea" -"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4" -"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e" -"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe" -"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4" -"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4" -"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab" -"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2" -"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca" -"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5" -"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9" -"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50" -"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc" -"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a", -"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81" -"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b" -"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3" -"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3" -"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73" -"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8" -"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3" -"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80" -"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3" -"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80" -"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8" -"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf" -"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3" -"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81" -"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81" -"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20" -"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7" -"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83" -"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e" -"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5" -"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42" -"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3" -"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80" -"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83" -"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae" -"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3" -"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79" -"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5" -"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a" -"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8" -"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81" -"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81" -"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97" -"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5" -"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81" -"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab" -"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3" -"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80" -"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82" -"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80" -"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3" -"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88" -"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88" -"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6" -"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96" -"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5" -"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81" -"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e" -"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84" -"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3" -"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82" -"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe" -"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3" -"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4" -"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c" -"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95" -"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83" -"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8" -"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3" -"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81" -"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3" -"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81" -"\x99\xe3\x80\x82\x0a\x0a"), -'euc_kr': ( -"\xa1\xdd\x20\xc6\xc4\xc0\xcc\xbd\xe3\x28\x50\x79\x74\x68\x6f\x6e" -"\x29\xc0\xba\x20\xb9\xe8\xbf\xec\xb1\xe2\x20\xbd\xb1\xb0\xed\x2c" -"\x20\xb0\xad\xb7\xc2\xc7\xd1\x20\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1" -"\xb9\xd6\x20\xbe\xf0\xbe\xee\xc0\xd4\xb4\xcf\xb4\xd9\x2e\x20\xc6" -"\xc4\xc0\xcc\xbd\xe3\xc0\xba\x0a\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce" -"\x20\xb0\xed\xbc\xf6\xc1\xd8\x20\xb5\xa5\xc0\xcc\xc5\xcd\x20\xb1" -"\xb8\xc1\xb6\xbf\xcd\x20\xb0\xa3\xb4\xdc\xc7\xcf\xc1\xf6\xb8\xb8" -"\x20\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce\x20\xb0\xb4\xc3\xbc\xc1\xf6" -"\xc7\xe2\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1\xb9\xd6\xc0\xbb\x0a\xc1" -"\xf6\xbf\xf8\xc7\xd5\xb4\xcf\xb4\xd9\x2e\x20\xc6\xc4\xc0\xcc\xbd" -"\xe3\xc0\xc7\x20\xbf\xec\xbe\xc6\x28\xe9\xd0\xe4\xba\x29\xc7\xd1" -"\x20\xb9\xae\xb9\xfd\xb0\xfa\x20\xb5\xbf\xc0\xfb\x20\xc5\xb8\xc0" -"\xcc\xc7\xce\x2c\x20\xb1\xd7\xb8\xae\xb0\xed\x20\xc0\xce\xc5\xcd" -"\xc7\xc1\xb8\xae\xc6\xc3\x0a\xc8\xaf\xb0\xe6\xc0\xba\x20\xc6\xc4" -"\xc0\xcc\xbd\xe3\xc0\xbb\x20\xbd\xba\xc5\xa9\xb8\xb3\xc6\xc3\xb0" -"\xfa\x20\xbf\xa9\xb7\xaf\x20\xba\xd0\xbe\xdf\xbf\xa1\xbc\xad\xbf" -"\xcd\x20\xb4\xeb\xba\xce\xba\xd0\xc0\xc7\x20\xc7\xc3\xb7\xa7\xc6" -"\xfb\xbf\xa1\xbc\xad\xc0\xc7\x20\xba\xfc\xb8\xa5\x0a\xbe\xd6\xc7" -"\xc3\xb8\xae\xc4\xc9\xc0\xcc\xbc\xc7\x20\xb0\xb3\xb9\xdf\xc0\xbb" -"\x20\xc7\xd2\x20\xbc\xf6\x20\xc0\xd6\xb4\xc2\x20\xc0\xcc\xbb\xf3" -"\xc0\xfb\xc0\xce\x20\xbe\xf0\xbe\xee\xb7\xce\x20\xb8\xb8\xb5\xe9" -"\xbe\xee\xc1\xdd\xb4\xcf\xb4\xd9\x2e\x0a\x0a\xa1\xd9\xc3\xb9\xb0" -"\xa1\xb3\xa1\x3a\x20\xb3\xaf\xbe\xc6\xb6\xf3\x20\xa4\xd4\xa4\xb6" -"\xa4\xd0\xa4\xd4\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4\xbe\xb1\x7e\x20" -"\xa4\xd4\xa4\xa4\xa4\xd2\xa4\xb7\xc5\xad\x21\x20\xa4\xd4\xa4\xa8" -"\xa4\xd1\xa4\xb7\xb1\xdd\xbe\xf8\xc0\xcc\x20\xc0\xfc\xa4\xd4\xa4" -"\xbe\xa4\xc8\xa4\xb2\xb4\xcf\xb4\xd9\x2e\x20\xa4\xd4\xa4\xb2\xa4" -"\xce\xa4\xaa\x2e\x20\xb1\xd7\xb7\xb1\xb0\xc5\x20\xa4\xd4\xa4\xb7" -"\xa4\xd1\xa4\xb4\xb4\xd9\x2e\x0a", -"\xe2\x97\x8e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\x28\x50\x79" -"\x74\x68\x6f\x6e\x29\xec\x9d\x80\x20\xeb\xb0\xb0\xec\x9a\xb0\xea" -"\xb8\xb0\x20\xec\x89\xbd\xea\xb3\xa0\x2c\x20\xea\xb0\x95\xeb\xa0" -"\xa5\xed\x95\x9c\x20\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8\xeb\x9e" -"\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96\xb4\xec\x9e\x85\xeb\x8b" -"\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec" -"\x9d\x80\x0a\xed\x9a\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20" -"\xea\xb3\xa0\xec\x88\x98\xec\xa4\x80\x20\xeb\x8d\xb0\xec\x9d\xb4" -"\xed\x84\xb0\x20\xea\xb5\xac\xec\xa1\xb0\xec\x99\x80\x20\xea\xb0" -"\x84\xeb\x8b\xa8\xed\x95\x98\xec\xa7\x80\xeb\xa7\x8c\x20\xed\x9a" -"\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20\xea\xb0\x9d\xec\xb2" -"\xb4\xec\xa7\x80\xed\x96\xa5\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8" -"\xeb\x9e\x98\xeb\xb0\x8d\xec\x9d\x84\x0a\xec\xa7\x80\xec\x9b\x90" -"\xed\x95\xa9\xeb\x8b\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d" -"\xb4\xec\x8d\xac\xec\x9d\x98\x20\xec\x9a\xb0\xec\x95\x84\x28\xe5" -"\x84\xaa\xe9\x9b\x85\x29\xed\x95\x9c\x20\xeb\xac\xb8\xeb\xb2\x95" -"\xea\xb3\xbc\x20\xeb\x8f\x99\xec\xa0\x81\x20\xed\x83\x80\xec\x9d" -"\xb4\xed\x95\x91\x2c\x20\xea\xb7\xb8\xeb\xa6\xac\xea\xb3\xa0\x20" -"\xec\x9d\xb8\xed\x84\xb0\xed\x94\x84\xeb\xa6\xac\xed\x8c\x85\x0a" -"\xed\x99\x98\xea\xb2\xbd\xec\x9d\x80\x20\xed\x8c\x8c\xec\x9d\xb4" -"\xec\x8d\xac\xec\x9d\x84\x20\xec\x8a\xa4\xed\x81\xac\xeb\xa6\xbd" -"\xed\x8c\x85\xea\xb3\xbc\x20\xec\x97\xac\xeb\x9f\xac\x20\xeb\xb6" -"\x84\xec\x95\xbc\xec\x97\x90\xec\x84\x9c\xec\x99\x80\x20\xeb\x8c" -"\x80\xeb\xb6\x80\xeb\xb6\x84\xec\x9d\x98\x20\xed\x94\x8c\xeb\x9e" -"\xab\xed\x8f\xbc\xec\x97\x90\xec\x84\x9c\xec\x9d\x98\x20\xeb\xb9" -"\xa0\xeb\xa5\xb8\x0a\xec\x95\xa0\xed\x94\x8c\xeb\xa6\xac\xec\xbc" -"\x80\xec\x9d\xb4\xec\x85\x98\x20\xea\xb0\x9c\xeb\xb0\x9c\xec\x9d" -"\x84\x20\xed\x95\xa0\x20\xec\x88\x98\x20\xec\x9e\x88\xeb\x8a\x94" -"\x20\xec\x9d\xb4\xec\x83\x81\xec\xa0\x81\xec\x9d\xb8\x20\xec\x96" -"\xb8\xec\x96\xb4\xeb\xa1\x9c\x20\xeb\xa7\x8c\xeb\x93\xa4\xec\x96" -"\xb4\xec\xa4\x8d\xeb\x8b\x88\xeb\x8b\xa4\x2e\x0a\x0a\xe2\x98\x86" -"\xec\xb2\xab\xea\xb0\x80\xeb\x81\x9d\x3a\x20\xeb\x82\xa0\xec\x95" -"\x84\xeb\x9d\xbc\x20\xec\x93\x94\xec\x93\x94\xec\x93\xa9\x7e\x20" -"\xeb\x8b\x81\xed\x81\xbc\x21\x20\xeb\x9c\xbd\xea\xb8\x88\xec\x97" -"\x86\xec\x9d\xb4\x20\xec\xa0\x84\xed\x99\xa5\xeb\x8b\x88\xeb\x8b" -"\xa4\x2e\x20\xeb\xb7\x81\x2e\x20\xea\xb7\xb8\xeb\x9f\xb0\xea\xb1" -"\xb0\x20\xec\x9d\x8e\xeb\x8b\xa4\x2e\x0a"), -'gb18030': ( -"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef" -"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3" -"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6" -"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4" -"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4" -"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca" -"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1" -"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7" -"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac" -"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3" -"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda" -"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0" -"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc" -"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8" -"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1" -"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6" -"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd" -"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3" -"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac" -"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0" -"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e" -"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20" -"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3" -"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd" -"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc" -"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba" -"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3" -"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9" -"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb" -"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c" -"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b" -"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74" -"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72" -"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20" -"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3" -"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72" -"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c" -"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82" -"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69" -"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e" -"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2" -"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4" -"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20" -"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c" -"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7" -"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a" -"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7" -"\x3a\x0a\x83\x35\xc7\x31\x83\x33\x9a\x33\x83\x32\xb1\x31\x83\x33" -"\x95\x31\x20\x82\x37\xd1\x36\x83\x30\x8c\x34\x83\x36\x84\x33\x20" -"\x82\x38\x89\x35\x82\x38\xfb\x36\x83\x33\x95\x35\x20\x83\x33\xd5" -"\x31\x82\x39\x81\x35\x20\x83\x30\xfd\x39\x83\x33\x86\x30\x20\x83" -"\x34\xdc\x33\x83\x35\xf6\x37\x83\x35\x97\x35\x20\x83\x35\xf9\x35" -"\x83\x30\x91\x39\x82\x38\x83\x39\x82\x39\xfc\x33\x83\x30\xf0\x34" -"\x20\x83\x32\xeb\x39\x83\x32\xeb\x35\x82\x39\x83\x39\x2e\x0a\x0a", -"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef" -"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7" -"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c" -"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5" -"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba" -"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c" -"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81" -"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5" -"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8" -"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d" -"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5" -"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99" -"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82" -"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90" -"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5" -"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb" -"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d" -"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90" -"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8" -"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4" -"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d" -"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7" -"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f" -"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8" -"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf" -"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5" -"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95" -"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7" -"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6" -"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5" -"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20" -"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7" -"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80" -"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80" -"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4" -"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a" -"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9" -"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf" -"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20" -"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5" -"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba" -"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5" -"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8" -"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd" -"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6" -"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70" -"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70" -"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75" -"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94" -"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8" -"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72" -"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab" -"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20" -"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70" -"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70" -"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75" -"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8" -"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89" -"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b" -"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7" -"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f" -"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6" -"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88" -"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab" -"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf" -"\x3a\x0a\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec\x9d\x80\x20\xea" -"\xb0\x95\xeb\xa0\xa5\xed\x95\x9c\x20\xea\xb8\xb0\xeb\x8a\xa5\xec" -"\x9d\x84\x20\xec\xa7\x80\xeb\x8b\x8c\x20\xeb\xb2\x94\xec\x9a\xa9" -"\x20\xec\xbb\xb4\xed\x93\xa8\xed\x84\xb0\x20\xed\x94\x84\xeb\xa1" -"\x9c\xea\xb7\xb8\xeb\x9e\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96" -"\xb4\xeb\x8b\xa4\x2e\x0a\x0a"), -'gb2312': ( -"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef" -"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3" -"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6" -"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4" -"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4" -"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca" -"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1" -"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7" -"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac" -"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3" -"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda" -"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0" -"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc" -"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8" -"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1" -"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6" -"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd" -"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3" -"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac" -"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0" -"\xa1\xa3\x0a\x0a", -"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef" -"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7" -"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c" -"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5" -"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba" -"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c" -"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81" -"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5" -"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8" -"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d" -"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5" -"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99" -"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82" -"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90" -"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5" -"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb" -"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d" -"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90" -"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8" -"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4" -"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d" -"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7" -"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f" -"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8" -"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf" -"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5" -"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95" -"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7" -"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6" -"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\x0a"), -'gbk': ( -"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef" -"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3" -"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6" -"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4" -"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4" -"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca" -"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1" -"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7" -"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac" -"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3" -"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda" -"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0" -"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc" -"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8" -"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1" -"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6" -"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd" -"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3" -"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac" -"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0" -"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e" -"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20" -"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3" -"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd" -"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc" -"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba" -"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3" -"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9" -"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb" -"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c" -"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b" -"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74" -"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72" -"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20" -"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3" -"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72" -"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c" -"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82" -"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69" -"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e" -"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2" -"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4" -"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20" -"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c" -"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7" -"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a" -"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7" -"\x3a\x0a\x0a", -"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef" -"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7" -"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c" -"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5" -"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba" -"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c" -"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81" -"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5" -"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8" -"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d" -"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5" -"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99" -"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82" -"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90" -"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5" -"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb" -"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d" -"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90" -"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8" -"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4" -"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d" -"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7" -"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f" -"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8" -"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf" -"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5" -"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95" -"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7" -"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6" -"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5" -"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20" -"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7" -"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80" -"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80" -"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4" -"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a" -"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9" -"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf" -"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20" -"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5" -"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba" -"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5" -"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8" -"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd" -"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6" -"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70" -"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70" -"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75" -"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94" -"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8" -"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72" -"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab" -"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20" -"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70" -"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70" -"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75" -"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8" -"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89" -"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b" -"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7" -"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f" -"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6" -"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88" -"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab" -"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf" -"\x3a\x0a\x0a"), -'johab': ( -"\x99\xb1\xa4\x77\x88\x62\xd0\x61\x20\xcd\x5c\xaf\xa1\xc5\xa9\x9c" -"\x61\x0a\x0a\xdc\xc0\xdc\xc0\x90\x73\x21\x21\x20\xf1\x67\xe2\x9c" -"\xf0\x55\xcc\x81\xa3\x89\x9f\x85\x8a\xa1\x20\xdc\xde\xdc\xd3\xd2" -"\x7a\xd9\xaf\xd9\xaf\xd9\xaf\x20\x8b\x77\x96\xd3\x20\xdc\xd1\x95" -"\x81\x20\xdc\xc0\x2e\x20\x2e\x0a\xed\x3c\xb5\x77\xdc\xd1\x93\x77" -"\xd2\x73\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xac\xe1\xb6\x89\x9e" -"\xa1\x20\x95\x65\xd0\x62\xf0\xe0\x20\xe0\x3b\xd2\x7a\x20\x21\x20" -"\x21\x20\x21\x87\x41\x2e\x87\x41\x0a\xd3\x61\xd3\x61\xd3\x61\x20" -"\x88\x41\x88\x41\x88\x41\xd9\x69\x87\x41\x5f\x87\x41\x20\xb4\xe1" -"\x9f\x9a\x20\xc8\xa1\xc5\xc1\x8b\x7a\x20\x95\x61\xb7\x77\x20\xc3" -"\x97\xe2\x9c\x97\x69\xf0\xe0\x20\xdc\xc0\x97\x61\x8b\x7a\x0a\xac" -"\xe9\x9f\x7a\x20\xe0\x3b\xd2\x7a\x20\x2e\x20\x2e\x20\x2e\x20\x2e" -"\x20\x8a\x89\xb4\x81\xae\xba\x20\xdc\xd1\x8a\xa1\x20\xdc\xde\x9f" -"\x89\xdc\xc2\x8b\x7a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9" -"\x8c\x61\xbb\x9a\x0a\xb5\xc1\xb2\xa1\xd2\x7a\x20\x21\x20\x21\x20" -"\xed\x3c\xb5\x77\xdc\xd1\x20\xe0\x3b\x93\x77\x8a\xa1\x20\xd9\x69" -"\xea\xbe\x89\xc5\x20\xb4\xf4\x93\x77\x8a\xa1\x93\x77\x20\xed\x3c" -"\x93\x77\x96\xc1\xd2\x7a\x20\x8b\x69\xb4\x81\x97\x7a\x0a\xdc\xde" -"\x9d\x61\x97\x41\xe2\x9c\x20\xaf\x81\xce\xa1\xae\xa1\xd2\x7a\x20" -"\xb4\xe1\x9f\x9a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9\xaf" -"\x82\xdc\xef\x97\x69\xb4\x7a\x21\x21\x20\xdc\xc0\xdc\xc0\x90\x73" -"\xd9\xbd\x20\xd9\x62\xd9\x62\x2a\x0a\x0a", -"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2" -"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89" -"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88" -"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1" -"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d" -"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20" -"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a" -"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8" -"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae" -"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85" -"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3" -"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20" -"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90" -"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93" -"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec" -"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e" -"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2" -"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea" -"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4" -"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed" -"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94" -"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8" -"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a" -"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea" -"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb" -"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed" -"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81" -"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb" -"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82" -"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"), -'shift_jis': ( -"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81" -"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7" -"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7" -"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64" -"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b" -"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83" -"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76" -"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4" -"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20" -"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9" -"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2" -"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82" -"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82" -"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83" -"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82" -"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89" -"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52" -"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93" -"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82" -"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75" -"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0" -"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3" -"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82" -"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82" -"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c" -"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83" -"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82" -"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95" -"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82" -"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc" -"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea" -"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6" -"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5" -"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe" -"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8" -"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2" -"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e" -"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f" -"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82" -"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82" -"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9" -"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0" -"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8" -"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85" -"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7" -"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50" -"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b" -"\x82\xc5\x82\xb7\x81\x42\x0a\x0a", -"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81" -"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b" -"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3" -"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3" -"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73" -"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8" -"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3" -"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80" -"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3" -"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80" -"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8" -"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf" -"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3" -"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81" -"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81" -"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20" -"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7" -"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83" -"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e" -"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5" -"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42" -"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3" -"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80" -"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83" -"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae" -"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3" -"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79" -"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5" -"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a" -"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8" -"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81" -"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81" -"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97" -"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5" -"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81" -"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab" -"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3" -"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80" -"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82" -"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80" -"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3" -"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88" -"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88" -"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6" -"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96" -"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5" -"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81" -"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e" -"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84" -"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3" -"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82" -"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe" -"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3" -"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4" -"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c" -"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95" -"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83" -"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8" -"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3" -"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81" -"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3" -"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81" -"\x99\xe3\x80\x82\x0a\x0a"), -'shift_jisx0213': ( -"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81" -"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7" -"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7" -"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64" -"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b" -"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83" -"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76" -"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4" -"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20" -"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9" -"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2" -"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82" -"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82" -"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83" -"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82" -"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89" -"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52" -"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93" -"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82" -"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75" -"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0" -"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3" -"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82" -"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82" -"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c" -"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83" -"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82" -"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95" -"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82" -"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc" -"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea" -"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6" -"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5" -"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe" -"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8" -"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2" -"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e" -"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f" -"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82" -"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82" -"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9" -"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0" -"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8" -"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85" -"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7" -"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50" -"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b" -"\x82\xc5\x82\xb7\x81\x42\x0a\x0a\x83\x6d\x82\xf5\x20\x83\x9e\x20" -"\x83\x67\x83\x4c\x88\x4b\x88\x79\x20\x98\x83\xfc\xd6\x20\xfc\xd2" -"\xfc\xe6\xfb\xd4\x0a", -"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81" -"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b" -"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3" -"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3" -"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73" -"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8" -"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3" -"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80" -"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba" -"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3" -"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80" -"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8" -"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf" -"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3" -"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81" -"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81" -"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20" -"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7" -"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83" -"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e" -"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5" -"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42" -"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3" -"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80" -"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83" -"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae" -"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3" -"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79" -"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5" -"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a" -"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8" -"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81" -"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3" -"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81" -"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97" -"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5" -"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81" -"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab" -"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3" -"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80" -"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82" -"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80" -"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3" -"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88" -"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88" -"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6" -"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96" -"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5" -"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81" -"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e" -"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84" -"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3" -"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82" -"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe" -"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3" -"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4" -"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c" -"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95" -"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6" -"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83" -"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8" -"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3" -"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81" -"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3" -"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81" -"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20" -"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6" -"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80" -"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"), -} diff --git a/lib-python/2.7/test/crashers/README b/lib-python/2.7/test/crashers/README --- a/lib-python/2.7/test/crashers/README +++ b/lib-python/2.7/test/crashers/README @@ -1,20 +1,16 @@ -This directory only contains tests for outstanding bugs that cause -the interpreter to segfault. Ideally this directory should always -be empty. Sometimes it may not be easy to fix the underlying cause. +This directory only contains tests for outstanding bugs that cause the +interpreter to segfault. Ideally this directory should always be empty, but +sometimes it may not be easy to fix the underlying cause and the bug is deemed +too obscure to invest the effort. Each test should fail when run from the command line: ./python Lib/test/crashers/weakref_in_del.py -Each test should have a link to the bug report: +Put as much info into a docstring or comments to help determine the cause of the +failure, as well as a bugs.python.org issue number if it exists. Particularly +note if the cause is system or environment dependent and what the variables are. - # http://python.org/sf/BUG# - -Put as much info into a docstring or comments to help determine -the cause of the failure. Particularly note if the cause is -system or environment dependent and what the variables are. - -Once the crash is fixed, the test case should be moved into an appropriate -test (even if it was originally from the test suite). This ensures the -regression doesn't happen again. And if it does, it should be easier -to track down. +Once the crash is fixed, the test case should be moved into an appropriate test +(even if it was originally from the test suite). This ensures the regression +doesn't happen again. And if it does, it should be easier to track down. diff --git a/lib-python/2.7/test/crashers/recursion_limit_too_high.py b/lib-python/2.7/test/crashers/recursion_limit_too_high.py --- a/lib-python/2.7/test/crashers/recursion_limit_too_high.py +++ b/lib-python/2.7/test/crashers/recursion_limit_too_high.py @@ -5,7 +5,7 @@ # file handles. # The point of this example is to show that sys.setrecursionlimit() is a -# hack, and not a robust solution. This example simply exercices a path +# hack, and not a robust solution. This example simply exercises a path # where it takes many C-level recursions, consuming a lot of stack # space, for each Python-level recursion. So 1000 times this amount of # stack space may be too much for standard platforms already. diff --git a/lib-python/2.7/test/decimaltestdata/and.decTest b/lib-python/2.7/test/decimaltestdata/and.decTest --- a/lib-python/2.7/test/decimaltestdata/and.decTest +++ b/lib-python/2.7/test/decimaltestdata/and.decTest @@ -1,338 +1,338 @@ ------------------------------------------------------------------------- --- and.decTest -- digitwise logical AND -- --- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. -- ------------------------------------------------------------------------- --- Please see the document "General Decimal Arithmetic Testcases" -- --- at http://www2.hursley.ibm.com/decimal for the description of -- --- these testcases. -- --- -- --- These testcases are experimental ('beta' versions), and they -- --- may contain errors. They are offered on an as-is basis. In -- --- particular, achieving the same results as the tests here is not -- --- a guarantee that an implementation complies with any Standard -- --- or specification. The tests are not exhaustive. -- --- -- --- Please send comments, suggestions, and corrections to the author: -- --- Mike Cowlishaw, IBM Fellow -- --- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK -- --- mfc at uk.ibm.com -- ------------------------------------------------------------------------- -version: 2.59 - -extended: 1 -precision: 9 -rounding: half_up -maxExponent: 999 -minExponent: -999 - --- Sanity check (truth table) -andx001 and 0 0 -> 0 -andx002 and 0 1 -> 0 -andx003 and 1 0 -> 0 -andx004 and 1 1 -> 1 -andx005 and 1100 1010 -> 1000 -andx006 and 1111 10 -> 10 -andx007 and 1111 1010 -> 1010 - --- and at msd and msd-1 -andx010 and 000000000 000000000 -> 0 -andx011 and 000000000 100000000 -> 0 -andx012 and 100000000 000000000 -> 0 -andx013 and 100000000 100000000 -> 100000000 -andx014 and 000000000 000000000 -> 0 -andx015 and 000000000 010000000 -> 0 -andx016 and 010000000 000000000 -> 0 -andx017 and 010000000 010000000 -> 10000000 - --- Various lengths --- 123456789 123456789 123456789 -andx021 and 111111111 111111111 -> 111111111 -andx022 and 111111111111 111111111 -> 111111111 -andx023 and 111111111111 11111111 -> 11111111 -andx024 and 111111111 11111111 -> 11111111 -andx025 and 111111111 1111111 -> 1111111 -andx026 and 111111111111 111111 -> 111111 -andx027 and 111111111111 11111 -> 11111 -andx028 and 111111111111 1111 -> 1111 -andx029 and 111111111111 111 -> 111 -andx031 and 111111111111 11 -> 11 -andx032 and 111111111111 1 -> 1 -andx033 and 111111111111 1111111111 -> 111111111 -andx034 and 11111111111 11111111111 -> 111111111 -andx035 and 1111111111 111111111111 -> 111111111 -andx036 and 111111111 1111111111111 -> 111111111 - -andx040 and 111111111 111111111111 -> 111111111 -andx041 and 11111111 111111111111 -> 11111111 -andx042 and 11111111 111111111 -> 11111111 -andx043 and 1111111 111111111 -> 1111111 -andx044 and 111111 111111111 -> 111111 -andx045 and 11111 111111111 -> 11111 -andx046 and 1111 111111111 -> 1111 -andx047 and 111 111111111 -> 111 -andx048 and 11 111111111 -> 11 -andx049 and 1 111111111 -> 1 - -andx050 and 1111111111 1 -> 1 -andx051 and 111111111 1 -> 1 -andx052 and 11111111 1 -> 1 -andx053 and 1111111 1 -> 1 -andx054 and 111111 1 -> 1 -andx055 and 11111 1 -> 1 -andx056 and 1111 1 -> 1 -andx057 and 111 1 -> 1 -andx058 and 11 1 -> 1 -andx059 and 1 1 -> 1 - -andx060 and 1111111111 0 -> 0 -andx061 and 111111111 0 -> 0 -andx062 and 11111111 0 -> 0 -andx063 and 1111111 0 -> 0 -andx064 and 111111 0 -> 0 -andx065 and 11111 0 -> 0 -andx066 and 1111 0 -> 0 -andx067 and 111 0 -> 0 -andx068 and 11 0 -> 0 -andx069 and 1 0 -> 0 - -andx070 and 1 1111111111 -> 1 -andx071 and 1 111111111 -> 1 -andx072 and 1 11111111 -> 1 -andx073 and 1 1111111 -> 1 -andx074 and 1 111111 -> 1 -andx075 and 1 11111 -> 1 -andx076 and 1 1111 -> 1 -andx077 and 1 111 -> 1 -andx078 and 1 11 -> 1 -andx079 and 1 1 -> 1 - -andx080 and 0 1111111111 -> 0 -andx081 and 0 111111111 -> 0 -andx082 and 0 11111111 -> 0 -andx083 and 0 1111111 -> 0 -andx084 and 0 111111 -> 0 -andx085 and 0 11111 -> 0 -andx086 and 0 1111 -> 0 -andx087 and 0 111 -> 0 -andx088 and 0 11 -> 0 -andx089 and 0 1 -> 0 - -andx090 and 011111111 111111111 -> 11111111 -andx091 and 101111111 111111111 -> 101111111 -andx092 and 110111111 111111111 -> 110111111 -andx093 and 111011111 111111111 -> 111011111 -andx094 and 111101111 111111111 -> 111101111 -andx095 and 111110111 111111111 -> 111110111 -andx096 and 111111011 111111111 -> 111111011 -andx097 and 111111101 111111111 -> 111111101 -andx098 and 111111110 111111111 -> 111111110 - -andx100 and 111111111 011111111 -> 11111111 -andx101 and 111111111 101111111 -> 101111111 -andx102 and 111111111 110111111 -> 110111111 -andx103 and 111111111 111011111 -> 111011111 -andx104 and 111111111 111101111 -> 111101111 -andx105 and 111111111 111110111 -> 111110111 -andx106 and 111111111 111111011 -> 111111011 -andx107 and 111111111 111111101 -> 111111101 -andx108 and 111111111 111111110 -> 111111110 - --- non-0/1 should not be accepted, nor should signs -andx220 and 111111112 111111111 -> NaN Invalid_operation -andx221 and 333333333 333333333 -> NaN Invalid_operation -andx222 and 555555555 555555555 -> NaN Invalid_operation -andx223 and 777777777 777777777 -> NaN Invalid_operation -andx224 and 999999999 999999999 -> NaN Invalid_operation -andx225 and 222222222 999999999 -> NaN Invalid_operation -andx226 and 444444444 999999999 -> NaN Invalid_operation -andx227 and 666666666 999999999 -> NaN Invalid_operation -andx228 and 888888888 999999999 -> NaN Invalid_operation -andx229 and 999999999 222222222 -> NaN Invalid_operation -andx230 and 999999999 444444444 -> NaN Invalid_operation -andx231 and 999999999 666666666 -> NaN Invalid_operation -andx232 and 999999999 888888888 -> NaN Invalid_operation --- a few randoms -andx240 and 567468689 -934981942 -> NaN Invalid_operation -andx241 and 567367689 934981942 -> NaN Invalid_operation -andx242 and -631917772 -706014634 -> NaN Invalid_operation -andx243 and -756253257 138579234 -> NaN Invalid_operation -andx244 and 835590149 567435400 -> NaN Invalid_operation --- test MSD -andx250 and 200000000 100000000 -> NaN Invalid_operation -andx251 and 700000000 100000000 -> NaN Invalid_operation -andx252 and 800000000 100000000 -> NaN Invalid_operation -andx253 and 900000000 100000000 -> NaN Invalid_operation -andx254 and 200000000 000000000 -> NaN Invalid_operation -andx255 and 700000000 000000000 -> NaN Invalid_operation -andx256 and 800000000 000000000 -> NaN Invalid_operation -andx257 and 900000000 000000000 -> NaN Invalid_operation -andx258 and 100000000 200000000 -> NaN Invalid_operation -andx259 and 100000000 700000000 -> NaN Invalid_operation -andx260 and 100000000 800000000 -> NaN Invalid_operation -andx261 and 100000000 900000000 -> NaN Invalid_operation -andx262 and 000000000 200000000 -> NaN Invalid_operation -andx263 and 000000000 700000000 -> NaN Invalid_operation -andx264 and 000000000 800000000 -> NaN Invalid_operation -andx265 and 000000000 900000000 -> NaN Invalid_operation --- test MSD-1 -andx270 and 020000000 100000000 -> NaN Invalid_operation -andx271 and 070100000 100000000 -> NaN Invalid_operation -andx272 and 080010000 100000001 -> NaN Invalid_operation -andx273 and 090001000 100000010 -> NaN Invalid_operation -andx274 and 100000100 020010100 -> NaN Invalid_operation -andx275 and 100000000 070001000 -> NaN Invalid_operation -andx276 and 100000010 080010100 -> NaN Invalid_operation -andx277 and 100000000 090000010 -> NaN Invalid_operation --- test LSD -andx280 and 001000002 100000000 -> NaN Invalid_operation -andx281 and 000000007 100000000 -> NaN Invalid_operation -andx282 and 000000008 100000000 -> NaN Invalid_operation -andx283 and 000000009 100000000 -> NaN Invalid_operation -andx284 and 100000000 000100002 -> NaN Invalid_operation -andx285 and 100100000 001000007 -> NaN Invalid_operation -andx286 and 100010000 010000008 -> NaN Invalid_operation -andx287 and 100001000 100000009 -> NaN Invalid_operation --- test Middie -andx288 and 001020000 100000000 -> NaN Invalid_operation -andx289 and 000070001 100000000 -> NaN Invalid_operation -andx290 and 000080000 100010000 -> NaN Invalid_operation -andx291 and 000090000 100001000 -> NaN Invalid_operation -andx292 and 100000010 000020100 -> NaN Invalid_operation -andx293 and 100100000 000070010 -> NaN Invalid_operation -andx294 and 100010100 000080001 -> NaN Invalid_operation -andx295 and 100001000 000090000 -> NaN Invalid_operation --- signs -andx296 and -100001000 -000000000 -> NaN Invalid_operation -andx297 and -100001000 000010000 -> NaN Invalid_operation -andx298 and 100001000 -000000000 -> NaN Invalid_operation -andx299 and 100001000 000011000 -> 1000 - --- Nmax, Nmin, Ntiny -andx331 and 2 9.99999999E+999 -> NaN Invalid_operation -andx332 and 3 1E-999 -> NaN Invalid_operation -andx333 and 4 1.00000000E-999 -> NaN Invalid_operation -andx334 and 5 1E-1007 -> NaN Invalid_operation -andx335 and 6 -1E-1007 -> NaN Invalid_operation -andx336 and 7 -1.00000000E-999 -> NaN Invalid_operation -andx337 and 8 -1E-999 -> NaN Invalid_operation -andx338 and 9 -9.99999999E+999 -> NaN Invalid_operation -andx341 and 9.99999999E+999 -18 -> NaN Invalid_operation -andx342 and 1E-999 01 -> NaN Invalid_operation -andx343 and 1.00000000E-999 -18 -> NaN Invalid_operation -andx344 and 1E-1007 18 -> NaN Invalid_operation -andx345 and -1E-1007 -10 -> NaN Invalid_operation -andx346 and -1.00000000E-999 18 -> NaN Invalid_operation -andx347 and -1E-999 10 -> NaN Invalid_operation -andx348 and -9.99999999E+999 -18 -> NaN Invalid_operation - --- A few other non-integers -andx361 and 1.0 1 -> NaN Invalid_operation -andx362 and 1E+1 1 -> NaN Invalid_operation -andx363 and 0.0 1 -> NaN Invalid_operation -andx364 and 0E+1 1 -> NaN Invalid_operation -andx365 and 9.9 1 -> NaN Invalid_operation -andx366 and 9E+1 1 -> NaN Invalid_operation -andx371 and 0 1.0 -> NaN Invalid_operation -andx372 and 0 1E+1 -> NaN Invalid_operation -andx373 and 0 0.0 -> NaN Invalid_operation -andx374 and 0 0E+1 -> NaN Invalid_operation -andx375 and 0 9.9 -> NaN Invalid_operation -andx376 and 0 9E+1 -> NaN Invalid_operation - --- All Specials are in error -andx780 and -Inf -Inf -> NaN Invalid_operation -andx781 and -Inf -1000 -> NaN Invalid_operation -andx782 and -Inf -1 -> NaN Invalid_operation -andx783 and -Inf -0 -> NaN Invalid_operation -andx784 and -Inf 0 -> NaN Invalid_operation -andx785 and -Inf 1 -> NaN Invalid_operation -andx786 and -Inf 1000 -> NaN Invalid_operation -andx787 and -1000 -Inf -> NaN Invalid_operation -andx788 and -Inf -Inf -> NaN Invalid_operation -andx789 and -1 -Inf -> NaN Invalid_operation -andx790 and -0 -Inf -> NaN Invalid_operation -andx791 and 0 -Inf -> NaN Invalid_operation -andx792 and 1 -Inf -> NaN Invalid_operation -andx793 and 1000 -Inf -> NaN Invalid_operation -andx794 and Inf -Inf -> NaN Invalid_operation - -andx800 and Inf -Inf -> NaN Invalid_operation -andx801 and Inf -1000 -> NaN Invalid_operation -andx802 and Inf -1 -> NaN Invalid_operation -andx803 and Inf -0 -> NaN Invalid_operation -andx804 and Inf 0 -> NaN Invalid_operation -andx805 and Inf 1 -> NaN Invalid_operation -andx806 and Inf 1000 -> NaN Invalid_operation -andx807 and Inf Inf -> NaN Invalid_operation -andx808 and -1000 Inf -> NaN Invalid_operation -andx809 and -Inf Inf -> NaN Invalid_operation -andx810 and -1 Inf -> NaN Invalid_operation -andx811 and -0 Inf -> NaN Invalid_operation -andx812 and 0 Inf -> NaN Invalid_operation -andx813 and 1 Inf -> NaN Invalid_operation -andx814 and 1000 Inf -> NaN Invalid_operation -andx815 and Inf Inf -> NaN Invalid_operation - -andx821 and NaN -Inf -> NaN Invalid_operation -andx822 and NaN -1000 -> NaN Invalid_operation -andx823 and NaN -1 -> NaN Invalid_operation -andx824 and NaN -0 -> NaN Invalid_operation -andx825 and NaN 0 -> NaN Invalid_operation -andx826 and NaN 1 -> NaN Invalid_operation -andx827 and NaN 1000 -> NaN Invalid_operation -andx828 and NaN Inf -> NaN Invalid_operation -andx829 and NaN NaN -> NaN Invalid_operation -andx830 and -Inf NaN -> NaN Invalid_operation -andx831 and -1000 NaN -> NaN Invalid_operation -andx832 and -1 NaN -> NaN Invalid_operation -andx833 and -0 NaN -> NaN Invalid_operation -andx834 and 0 NaN -> NaN Invalid_operation -andx835 and 1 NaN -> NaN Invalid_operation -andx836 and 1000 NaN -> NaN Invalid_operation -andx837 and Inf NaN -> NaN Invalid_operation - -andx841 and sNaN -Inf -> NaN Invalid_operation -andx842 and sNaN -1000 -> NaN Invalid_operation -andx843 and sNaN -1 -> NaN Invalid_operation -andx844 and sNaN -0 -> NaN Invalid_operation -andx845 and sNaN 0 -> NaN Invalid_operation -andx846 and sNaN 1 -> NaN Invalid_operation -andx847 and sNaN 1000 -> NaN Invalid_operation -andx848 and sNaN NaN -> NaN Invalid_operation -andx849 and sNaN sNaN -> NaN Invalid_operation -andx850 and NaN sNaN -> NaN Invalid_operation -andx851 and -Inf sNaN -> NaN Invalid_operation -andx852 and -1000 sNaN -> NaN Invalid_operation -andx853 and -1 sNaN -> NaN Invalid_operation -andx854 and -0 sNaN -> NaN Invalid_operation -andx855 and 0 sNaN -> NaN Invalid_operation -andx856 and 1 sNaN -> NaN Invalid_operation -andx857 and 1000 sNaN -> NaN Invalid_operation -andx858 and Inf sNaN -> NaN Invalid_operation -andx859 and NaN sNaN -> NaN Invalid_operation - --- propagating NaNs -andx861 and NaN1 -Inf -> NaN Invalid_operation -andx862 and +NaN2 -1000 -> NaN Invalid_operation -andx863 and NaN3 1000 -> NaN Invalid_operation -andx864 and NaN4 Inf -> NaN Invalid_operation -andx865 and NaN5 +NaN6 -> NaN Invalid_operation -andx866 and -Inf NaN7 -> NaN Invalid_operation -andx867 and -1000 NaN8 -> NaN Invalid_operation -andx868 and 1000 NaN9 -> NaN Invalid_operation -andx869 and Inf +NaN10 -> NaN Invalid_operation -andx871 and sNaN11 -Inf -> NaN Invalid_operation -andx872 and sNaN12 -1000 -> NaN Invalid_operation -andx873 and sNaN13 1000 -> NaN Invalid_operation -andx874 and sNaN14 NaN17 -> NaN Invalid_operation -andx875 and sNaN15 sNaN18 -> NaN Invalid_operation -andx876 and NaN16 sNaN19 -> NaN Invalid_operation -andx877 and -Inf +sNaN20 -> NaN Invalid_operation -andx878 and -1000 sNaN21 -> NaN Invalid_operation -andx879 and 1000 sNaN22 -> NaN Invalid_operation -andx880 and Inf sNaN23 -> NaN Invalid_operation -andx881 and +NaN25 +sNaN24 -> NaN Invalid_operation -andx882 and -NaN26 NaN28 -> NaN Invalid_operation -andx883 and -sNaN27 sNaN29 -> NaN Invalid_operation -andx884 and 1000 -NaN30 -> NaN Invalid_operation -andx885 and 1000 -sNaN31 -> NaN Invalid_operation +------------------------------------------------------------------------ +-- and.decTest -- digitwise logical AND -- +-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. -- +------------------------------------------------------------------------ +-- Please see the document "General Decimal Arithmetic Testcases" -- +-- at http://www2.hursley.ibm.com/decimal for the description of -- +-- these testcases. -- +-- -- +-- These testcases are experimental ('beta' versions), and they -- +-- may contain errors. They are offered on an as-is basis. In -- +-- particular, achieving the same results as the tests here is not -- +-- a guarantee that an implementation complies with any Standard -- +-- or specification. The tests are not exhaustive. -- +-- -- +-- Please send comments, suggestions, and corrections to the author: -- +-- Mike Cowlishaw, IBM Fellow -- +-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK -- +-- mfc at uk.ibm.com -- +------------------------------------------------------------------------ +version: 2.59 + +extended: 1 +precision: 9 +rounding: half_up +maxExponent: 999 +minExponent: -999 + +-- Sanity check (truth table) +andx001 and 0 0 -> 0 +andx002 and 0 1 -> 0 +andx003 and 1 0 -> 0 +andx004 and 1 1 -> 1 +andx005 and 1100 1010 -> 1000 +andx006 and 1111 10 -> 10 +andx007 and 1111 1010 -> 1010 + +-- and at msd and msd-1 +andx010 and 000000000 000000000 -> 0 +andx011 and 000000000 100000000 -> 0 +andx012 and 100000000 000000000 -> 0 +andx013 and 100000000 100000000 -> 100000000 +andx014 and 000000000 000000000 -> 0 +andx015 and 000000000 010000000 -> 0 +andx016 and 010000000 000000000 -> 0 +andx017 and 010000000 010000000 -> 10000000 + +-- Various lengths +-- 123456789 123456789 123456789 +andx021 and 111111111 111111111 -> 111111111 +andx022 and 111111111111 111111111 -> 111111111 +andx023 and 111111111111 11111111 -> 11111111 +andx024 and 111111111 11111111 -> 11111111 +andx025 and 111111111 1111111 -> 1111111 +andx026 and 111111111111 111111 -> 111111 +andx027 and 111111111111 11111 -> 11111 +andx028 and 111111111111 1111 -> 1111 +andx029 and 111111111111 111 -> 111 +andx031 and 111111111111 11 -> 11 +andx032 and 111111111111 1 -> 1 +andx033 and 111111111111 1111111111 -> 111111111 +andx034 and 11111111111 11111111111 -> 111111111 +andx035 and 1111111111 111111111111 -> 111111111 +andx036 and 111111111 1111111111111 -> 111111111 + +andx040 and 111111111 111111111111 -> 111111111 +andx041 and 11111111 111111111111 -> 11111111 +andx042 and 11111111 111111111 -> 11111111 +andx043 and 1111111 111111111 -> 1111111 +andx044 and 111111 111111111 -> 111111 +andx045 and 11111 111111111 -> 11111 +andx046 and 1111 111111111 -> 1111 +andx047 and 111 111111111 -> 111 +andx048 and 11 111111111 -> 11 +andx049 and 1 111111111 -> 1 + +andx050 and 1111111111 1 -> 1 +andx051 and 111111111 1 -> 1 +andx052 and 11111111 1 -> 1 +andx053 and 1111111 1 -> 1 +andx054 and 111111 1 -> 1 +andx055 and 11111 1 -> 1 +andx056 and 1111 1 -> 1 +andx057 and 111 1 -> 1 +andx058 and 11 1 -> 1 +andx059 and 1 1 -> 1 + +andx060 and 1111111111 0 -> 0 +andx061 and 111111111 0 -> 0 +andx062 and 11111111 0 -> 0 +andx063 and 1111111 0 -> 0 +andx064 and 111111 0 -> 0 +andx065 and 11111 0 -> 0 +andx066 and 1111 0 -> 0 +andx067 and 111 0 -> 0 +andx068 and 11 0 -> 0 +andx069 and 1 0 -> 0 + +andx070 and 1 1111111111 -> 1 +andx071 and 1 111111111 -> 1 +andx072 and 1 11111111 -> 1 +andx073 and 1 1111111 -> 1 +andx074 and 1 111111 -> 1 +andx075 and 1 11111 -> 1 +andx076 and 1 1111 -> 1 +andx077 and 1 111 -> 1 +andx078 and 1 11 -> 1 +andx079 and 1 1 -> 1 + +andx080 and 0 1111111111 -> 0 +andx081 and 0 111111111 -> 0 +andx082 and 0 11111111 -> 0 +andx083 and 0 1111111 -> 0 +andx084 and 0 111111 -> 0 +andx085 and 0 11111 -> 0 +andx086 and 0 1111 -> 0 +andx087 and 0 111 -> 0 +andx088 and 0 11 -> 0 +andx089 and 0 1 -> 0 + +andx090 and 011111111 111111111 -> 11111111 +andx091 and 101111111 111111111 -> 101111111 +andx092 and 110111111 111111111 -> 110111111 +andx093 and 111011111 111111111 -> 111011111 +andx094 and 111101111 111111111 -> 111101111 +andx095 and 111110111 111111111 -> 111110111 +andx096 and 111111011 111111111 -> 111111011 +andx097 and 111111101 111111111 -> 111111101 +andx098 and 111111110 111111111 -> 111111110 + +andx100 and 111111111 011111111 -> 11111111 +andx101 and 111111111 101111111 -> 101111111 +andx102 and 111111111 110111111 -> 110111111 +andx103 and 111111111 111011111 -> 111011111 +andx104 and 111111111 111101111 -> 111101111 +andx105 and 111111111 111110111 -> 111110111 +andx106 and 111111111 111111011 -> 111111011 +andx107 and 111111111 111111101 -> 111111101 +andx108 and 111111111 111111110 -> 111111110 + +-- non-0/1 should not be accepted, nor should signs +andx220 and 111111112 111111111 -> NaN Invalid_operation +andx221 and 333333333 333333333 -> NaN Invalid_operation +andx222 and 555555555 555555555 -> NaN Invalid_operation +andx223 and 777777777 777777777 -> NaN Invalid_operation +andx224 and 999999999 999999999 -> NaN Invalid_operation +andx225 and 222222222 999999999 -> NaN Invalid_operation +andx226 and 444444444 999999999 -> NaN Invalid_operation +andx227 and 666666666 999999999 -> NaN Invalid_operation +andx228 and 888888888 999999999 -> NaN Invalid_operation +andx229 and 999999999 222222222 -> NaN Invalid_operation +andx230 and 999999999 444444444 -> NaN Invalid_operation +andx231 and 999999999 666666666 -> NaN Invalid_operation +andx232 and 999999999 888888888 -> NaN Invalid_operation +-- a few randoms +andx240 and 567468689 -934981942 -> NaN Invalid_operation +andx241 and 567367689 934981942 -> NaN Invalid_operation +andx242 and -631917772 -706014634 -> NaN Invalid_operation +andx243 and -756253257 138579234 -> NaN Invalid_operation +andx244 and 835590149 567435400 -> NaN Invalid_operation +-- test MSD +andx250 and 200000000 100000000 -> NaN Invalid_operation +andx251 and 700000000 100000000 -> NaN Invalid_operation +andx252 and 800000000 100000000 -> NaN Invalid_operation +andx253 and 900000000 100000000 -> NaN Invalid_operation +andx254 and 200000000 000000000 -> NaN Invalid_operation +andx255 and 700000000 000000000 -> NaN Invalid_operation +andx256 and 800000000 000000000 -> NaN Invalid_operation +andx257 and 900000000 000000000 -> NaN Invalid_operation +andx258 and 100000000 200000000 -> NaN Invalid_operation +andx259 and 100000000 700000000 -> NaN Invalid_operation +andx260 and 100000000 800000000 -> NaN Invalid_operation +andx261 and 100000000 900000000 -> NaN Invalid_operation +andx262 and 000000000 200000000 -> NaN Invalid_operation +andx263 and 000000000 700000000 -> NaN Invalid_operation +andx264 and 000000000 800000000 -> NaN Invalid_operation +andx265 and 000000000 900000000 -> NaN Invalid_operation +-- test MSD-1 +andx270 and 020000000 100000000 -> NaN Invalid_operation +andx271 and 070100000 100000000 -> NaN Invalid_operation +andx272 and 080010000 100000001 -> NaN Invalid_operation +andx273 and 090001000 100000010 -> NaN Invalid_operation +andx274 and 100000100 020010100 -> NaN Invalid_operation +andx275 and 100000000 070001000 -> NaN Invalid_operation +andx276 and 100000010 080010100 -> NaN Invalid_operation +andx277 and 100000000 090000010 -> NaN Invalid_operation +-- test LSD +andx280 and 001000002 100000000 -> NaN Invalid_operation +andx281 and 000000007 100000000 -> NaN Invalid_operation +andx282 and 000000008 100000000 -> NaN Invalid_operation +andx283 and 000000009 100000000 -> NaN Invalid_operation +andx284 and 100000000 000100002 -> NaN Invalid_operation +andx285 and 100100000 001000007 -> NaN Invalid_operation +andx286 and 100010000 010000008 -> NaN Invalid_operation +andx287 and 100001000 100000009 -> NaN Invalid_operation +-- test Middie +andx288 and 001020000 100000000 -> NaN Invalid_operation +andx289 and 000070001 100000000 -> NaN Invalid_operation +andx290 and 000080000 100010000 -> NaN Invalid_operation +andx291 and 000090000 100001000 -> NaN Invalid_operation +andx292 and 100000010 000020100 -> NaN Invalid_operation +andx293 and 100100000 000070010 -> NaN Invalid_operation +andx294 and 100010100 000080001 -> NaN Invalid_operation +andx295 and 100001000 000090000 -> NaN Invalid_operation +-- signs +andx296 and -100001000 -000000000 -> NaN Invalid_operation +andx297 and -100001000 000010000 -> NaN Invalid_operation +andx298 and 100001000 -000000000 -> NaN Invalid_operation +andx299 and 100001000 000011000 -> 1000 + +-- Nmax, Nmin, Ntiny +andx331 and 2 9.99999999E+999 -> NaN Invalid_operation +andx332 and 3 1E-999 -> NaN Invalid_operation +andx333 and 4 1.00000000E-999 -> NaN Invalid_operation +andx334 and 5 1E-1007 -> NaN Invalid_operation +andx335 and 6 -1E-1007 -> NaN Invalid_operation +andx336 and 7 -1.00000000E-999 -> NaN Invalid_operation +andx337 and 8 -1E-999 -> NaN Invalid_operation +andx338 and 9 -9.99999999E+999 -> NaN Invalid_operation +andx341 and 9.99999999E+999 -18 -> NaN Invalid_operation +andx342 and 1E-999 01 -> NaN Invalid_operation +andx343 and 1.00000000E-999 -18 -> NaN Invalid_operation +andx344 and 1E-1007 18 -> NaN Invalid_operation +andx345 and -1E-1007 -10 -> NaN Invalid_operation +andx346 and -1.00000000E-999 18 -> NaN Invalid_operation +andx347 and -1E-999 10 -> NaN Invalid_operation +andx348 and -9.99999999E+999 -18 -> NaN Invalid_operation + +-- A few other non-integers +andx361 and 1.0 1 -> NaN Invalid_operation +andx362 and 1E+1 1 -> NaN Invalid_operation +andx363 and 0.0 1 -> NaN Invalid_operation +andx364 and 0E+1 1 -> NaN Invalid_operation +andx365 and 9.9 1 -> NaN Invalid_operation +andx366 and 9E+1 1 -> NaN Invalid_operation +andx371 and 0 1.0 -> NaN Invalid_operation +andx372 and 0 1E+1 -> NaN Invalid_operation +andx373 and 0 0.0 -> NaN Invalid_operation +andx374 and 0 0E+1 -> NaN Invalid_operation +andx375 and 0 9.9 -> NaN Invalid_operation +andx376 and 0 9E+1 -> NaN Invalid_operation + +-- All Specials are in error +andx780 and -Inf -Inf -> NaN Invalid_operation +andx781 and -Inf -1000 -> NaN Invalid_operation +andx782 and -Inf -1 -> NaN Invalid_operation +andx783 and -Inf -0 -> NaN Invalid_operation +andx784 and -Inf 0 -> NaN Invalid_operation +andx785 and -Inf 1 -> NaN Invalid_operation +andx786 and -Inf 1000 -> NaN Invalid_operation +andx787 and -1000 -Inf -> NaN Invalid_operation +andx788 and -Inf -Inf -> NaN Invalid_operation +andx789 and -1 -Inf -> NaN Invalid_operation +andx790 and -0 -Inf -> NaN Invalid_operation +andx791 and 0 -Inf -> NaN Invalid_operation +andx792 and 1 -Inf -> NaN Invalid_operation +andx793 and 1000 -Inf -> NaN Invalid_operation +andx794 and Inf -Inf -> NaN Invalid_operation + +andx800 and Inf -Inf -> NaN Invalid_operation +andx801 and Inf -1000 -> NaN Invalid_operation +andx802 and Inf -1 -> NaN Invalid_operation +andx803 and Inf -0 -> NaN Invalid_operation +andx804 and Inf 0 -> NaN Invalid_operation +andx805 and Inf 1 -> NaN Invalid_operation +andx806 and Inf 1000 -> NaN Invalid_operation +andx807 and Inf Inf -> NaN Invalid_operation +andx808 and -1000 Inf -> NaN Invalid_operation +andx809 and -Inf Inf -> NaN Invalid_operation +andx810 and -1 Inf -> NaN Invalid_operation +andx811 and -0 Inf -> NaN Invalid_operation +andx812 and 0 Inf -> NaN Invalid_operation +andx813 and 1 Inf -> NaN Invalid_operation +andx814 and 1000 Inf -> NaN Invalid_operation +andx815 and Inf Inf -> NaN Invalid_operation + +andx821 and NaN -Inf -> NaN Invalid_operation +andx822 and NaN -1000 -> NaN Invalid_operation +andx823 and NaN -1 -> NaN Invalid_operation +andx824 and NaN -0 -> NaN Invalid_operation +andx825 and NaN 0 -> NaN Invalid_operation +andx826 and NaN 1 -> NaN Invalid_operation +andx827 and NaN 1000 -> NaN Invalid_operation +andx828 and NaN Inf -> NaN Invalid_operation +andx829 and NaN NaN -> NaN Invalid_operation +andx830 and -Inf NaN -> NaN Invalid_operation +andx831 and -1000 NaN -> NaN Invalid_operation +andx832 and -1 NaN -> NaN Invalid_operation +andx833 and -0 NaN -> NaN Invalid_operation +andx834 and 0 NaN -> NaN Invalid_operation +andx835 and 1 NaN -> NaN Invalid_operation +andx836 and 1000 NaN -> NaN Invalid_operation +andx837 and Inf NaN -> NaN Invalid_operation + +andx841 and sNaN -Inf -> NaN Invalid_operation +andx842 and sNaN -1000 -> NaN Invalid_operation +andx843 and sNaN -1 -> NaN Invalid_operation +andx844 and sNaN -0 -> NaN Invalid_operation +andx845 and sNaN 0 -> NaN Invalid_operation +andx846 and sNaN 1 -> NaN Invalid_operation +andx847 and sNaN 1000 -> NaN Invalid_operation +andx848 and sNaN NaN -> NaN Invalid_operation +andx849 and sNaN sNaN -> NaN Invalid_operation +andx850 and NaN sNaN -> NaN Invalid_operation +andx851 and -Inf sNaN -> NaN Invalid_operation +andx852 and -1000 sNaN -> NaN Invalid_operation +andx853 and -1 sNaN -> NaN Invalid_operation +andx854 and -0 sNaN -> NaN Invalid_operation +andx855 and 0 sNaN -> NaN Invalid_operation +andx856 and 1 sNaN -> NaN Invalid_operation +andx857 and 1000 sNaN -> NaN Invalid_operation +andx858 and Inf sNaN -> NaN Invalid_operation +andx859 and NaN sNaN -> NaN Invalid_operation + +-- propagating NaNs +andx861 and NaN1 -Inf -> NaN Invalid_operation +andx862 and +NaN2 -1000 -> NaN Invalid_operation +andx863 and NaN3 1000 -> NaN Invalid_operation +andx864 and NaN4 Inf -> NaN Invalid_operation +andx865 and NaN5 +NaN6 -> NaN Invalid_operation +andx866 and -Inf NaN7 -> NaN Invalid_operation +andx867 and -1000 NaN8 -> NaN Invalid_operation +andx868 and 1000 NaN9 -> NaN Invalid_operation +andx869 and Inf +NaN10 -> NaN Invalid_operation +andx871 and sNaN11 -Inf -> NaN Invalid_operation +andx872 and sNaN12 -1000 -> NaN Invalid_operation +andx873 and sNaN13 1000 -> NaN Invalid_operation +andx874 and sNaN14 NaN17 -> NaN Invalid_operation +andx875 and sNaN15 sNaN18 -> NaN Invalid_operation +andx876 and NaN16 sNaN19 -> NaN Invalid_operation +andx877 and -Inf +sNaN20 -> NaN Invalid_operation +andx878 and -1000 sNaN21 -> NaN Invalid_operation +andx879 and 1000 sNaN22 -> NaN Invalid_operation +andx880 and Inf sNaN23 -> NaN Invalid_operation +andx881 and +NaN25 +sNaN24 -> NaN Invalid_operation +andx882 and -NaN26 NaN28 -> NaN Invalid_operation +andx883 and -sNaN27 sNaN29 -> NaN Invalid_operation +andx884 and 1000 -NaN30 -> NaN Invalid_operation +andx885 and 1000 -sNaN31 -> NaN Invalid_operation diff --git a/lib-python/2.7/test/decimaltestdata/class.decTest b/lib-python/2.7/test/decimaltestdata/class.decTest --- a/lib-python/2.7/test/decimaltestdata/class.decTest +++ b/lib-python/2.7/test/decimaltestdata/class.decTest @@ -1,131 +1,131 @@ ------------------------------------------------------------------------- --- class.decTest -- Class operations -- --- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. -- ------------------------------------------------------------------------- --- Please see the document "General Decimal Arithmetic Testcases" -- --- at http://www2.hursley.ibm.com/decimal for the description of -- --- these testcases. -- --- -- --- These testcases are experimental ('beta' versions), and they -- --- may contain errors. They are offered on an as-is basis. In -- --- particular, achieving the same results as the tests here is not -- --- a guarantee that an implementation complies with any Standard -- --- or specification. The tests are not exhaustive. -- --- -- --- Please send comments, suggestions, and corrections to the author: -- --- Mike Cowlishaw, IBM Fellow -- --- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK -- --- mfc at uk.ibm.com -- ------------------------------------------------------------------------- -version: 2.59 - --- [New 2006.11.27] - -precision: 9 -maxExponent: 999 -minExponent: -999 -extended: 1 -clamp: 1 -rounding: half_even - -clasx001 class 0 -> +Zero -clasx002 class 0.00 -> +Zero -clasx003 class 0E+5 -> +Zero -clasx004 class 1E-1007 -> +Subnormal -clasx005 class 0.1E-999 -> +Subnormal -clasx006 class 0.99999999E-999 -> +Subnormal -clasx007 class 1.00000000E-999 -> +Normal -clasx008 class 1E-999 -> +Normal -clasx009 class 1E-100 -> +Normal -clasx010 class 1E-10 -> +Normal -clasx012 class 1E-1 -> +Normal -clasx013 class 1 -> +Normal -clasx014 class 2.50 -> +Normal -clasx015 class 100.100 -> +Normal -clasx016 class 1E+30 -> +Normal -clasx017 class 1E+999 -> +Normal -clasx018 class 9.99999999E+999 -> +Normal -clasx019 class Inf -> +Infinity - -clasx021 class -0 -> -Zero -clasx022 class -0.00 -> -Zero -clasx023 class -0E+5 -> -Zero -clasx024 class -1E-1007 -> -Subnormal -clasx025 class -0.1E-999 -> -Subnormal -clasx026 class -0.99999999E-999 -> -Subnormal -clasx027 class -1.00000000E-999 -> -Normal -clasx028 class -1E-999 -> -Normal -clasx029 class -1E-100 -> -Normal -clasx030 class -1E-10 -> -Normal -clasx032 class -1E-1 -> -Normal -clasx033 class -1 -> -Normal -clasx034 class -2.50 -> -Normal -clasx035 class -100.100 -> -Normal -clasx036 class -1E+30 -> -Normal -clasx037 class -1E+999 -> -Normal -clasx038 class -9.99999999E+999 -> -Normal -clasx039 class -Inf -> -Infinity - -clasx041 class NaN -> NaN -clasx042 class -NaN -> NaN -clasx043 class +NaN12345 -> NaN -clasx044 class sNaN -> sNaN -clasx045 class -sNaN -> sNaN -clasx046 class +sNaN12345 -> sNaN - - --- decimal64 bounds - -precision: 16 -maxExponent: 384 -minExponent: -383 -clamp: 1 -rounding: half_even - -clasx201 class 0 -> +Zero -clasx202 class 0.00 -> +Zero -clasx203 class 0E+5 -> +Zero -clasx204 class 1E-396 -> +Subnormal -clasx205 class 0.1E-383 -> +Subnormal -clasx206 class 0.999999999999999E-383 -> +Subnormal -clasx207 class 1.000000000000000E-383 -> +Normal -clasx208 class 1E-383 -> +Normal -clasx209 class 1E-100 -> +Normal -clasx210 class 1E-10 -> +Normal -clasx212 class 1E-1 -> +Normal -clasx213 class 1 -> +Normal -clasx214 class 2.50 -> +Normal -clasx215 class 100.100 -> +Normal -clasx216 class 1E+30 -> +Normal -clasx217 class 1E+384 -> +Normal -clasx218 class 9.999999999999999E+384 -> +Normal -clasx219 class Inf -> +Infinity - -clasx221 class -0 -> -Zero -clasx222 class -0.00 -> -Zero -clasx223 class -0E+5 -> -Zero -clasx224 class -1E-396 -> -Subnormal -clasx225 class -0.1E-383 -> -Subnormal -clasx226 class -0.999999999999999E-383 -> -Subnormal -clasx227 class -1.000000000000000E-383 -> -Normal -clasx228 class -1E-383 -> -Normal -clasx229 class -1E-100 -> -Normal -clasx230 class -1E-10 -> -Normal -clasx232 class -1E-1 -> -Normal -clasx233 class -1 -> -Normal -clasx234 class -2.50 -> -Normal -clasx235 class -100.100 -> -Normal -clasx236 class -1E+30 -> -Normal -clasx237 class -1E+384 -> -Normal -clasx238 class -9.999999999999999E+384 -> -Normal -clasx239 class -Inf -> -Infinity - -clasx241 class NaN -> NaN -clasx242 class -NaN -> NaN -clasx243 class +NaN12345 -> NaN -clasx244 class sNaN -> sNaN -clasx245 class -sNaN -> sNaN -clasx246 class +sNaN12345 -> sNaN - - - +------------------------------------------------------------------------ +-- class.decTest -- Class operations -- +-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. -- +------------------------------------------------------------------------ +-- Please see the document "General Decimal Arithmetic Testcases" -- +-- at http://www2.hursley.ibm.com/decimal for the description of -- +-- these testcases. -- +-- -- +-- These testcases are experimental ('beta' versions), and they -- +-- may contain errors. They are offered on an as-is basis. In -- +-- particular, achieving the same results as the tests here is not -- +-- a guarantee that an implementation complies with any Standard -- +-- or specification. The tests are not exhaustive. -- +-- -- +-- Please send comments, suggestions, and corrections to the author: -- +-- Mike Cowlishaw, IBM Fellow -- +-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK -- +-- mfc at uk.ibm.com -- +------------------------------------------------------------------------ +version: 2.59 + +-- [New 2006.11.27] + +precision: 9 +maxExponent: 999 +minExponent: -999 +extended: 1 +clamp: 1 +rounding: half_even + +clasx001 class 0 -> +Zero +clasx002 class 0.00 -> +Zero +clasx003 class 0E+5 -> +Zero +clasx004 class 1E-1007 -> +Subnormal +clasx005 class 0.1E-999 -> +Subnormal +clasx006 class 0.99999999E-999 -> +Subnormal +clasx007 class 1.00000000E-999 -> +Normal +clasx008 class 1E-999 -> +Normal +clasx009 class 1E-100 -> +Normal +clasx010 class 1E-10 -> +Normal +clasx012 class 1E-1 -> +Normal +clasx013 class 1 -> +Normal +clasx014 class 2.50 -> +Normal +clasx015 class 100.100 -> +Normal +clasx016 class 1E+30 -> +Normal +clasx017 class 1E+999 -> +Normal +clasx018 class 9.99999999E+999 -> +Normal +clasx019 class Inf -> +Infinity + +clasx021 class -0 -> -Zero +clasx022 class -0.00 -> -Zero +clasx023 class -0E+5 -> -Zero +clasx024 class -1E-1007 -> -Subnormal +clasx025 class -0.1E-999 -> -Subnormal +clasx026 class -0.99999999E-999 -> -Subnormal +clasx027 class -1.00000000E-999 -> -Normal +clasx028 class -1E-999 -> -Normal +clasx029 class -1E-100 -> -Normal +clasx030 class -1E-10 -> -Normal +clasx032 class -1E-1 -> -Normal +clasx033 class -1 -> -Normal +clasx034 class -2.50 -> -Normal +clasx035 class -100.100 -> -Normal +clasx036 class -1E+30 -> -Normal +clasx037 class -1E+999 -> -Normal +clasx038 class -9.99999999E+999 -> -Normal +clasx039 class -Inf -> -Infinity + +clasx041 class NaN -> NaN +clasx042 class -NaN -> NaN +clasx043 class +NaN12345 -> NaN +clasx044 class sNaN -> sNaN +clasx045 class -sNaN -> sNaN +clasx046 class +sNaN12345 -> sNaN + + +-- decimal64 bounds + +precision: 16 +maxExponent: 384 +minExponent: -383 +clamp: 1 +rounding: half_even + +clasx201 class 0 -> +Zero +clasx202 class 0.00 -> +Zero +clasx203 class 0E+5 -> +Zero +clasx204 class 1E-396 -> +Subnormal +clasx205 class 0.1E-383 -> +Subnormal +clasx206 class 0.999999999999999E-383 -> +Subnormal +clasx207 class 1.000000000000000E-383 -> +Normal +clasx208 class 1E-383 -> +Normal +clasx209 class 1E-100 -> +Normal +clasx210 class 1E-10 -> +Normal +clasx212 class 1E-1 -> +Normal +clasx213 class 1 -> +Normal +clasx214 class 2.50 -> +Normal +clasx215 class 100.100 -> +Normal +clasx216 class 1E+30 -> +Normal +clasx217 class 1E+384 -> +Normal +clasx218 class 9.999999999999999E+384 -> +Normal +clasx219 class Inf -> +Infinity + +clasx221 class -0 -> -Zero +clasx222 class -0.00 -> -Zero +clasx223 class -0E+5 -> -Zero +clasx224 class -1E-396 -> -Subnormal +clasx225 class -0.1E-383 -> -Subnormal +clasx226 class -0.999999999999999E-383 -> -Subnormal +clasx227 class -1.000000000000000E-383 -> -Normal +clasx228 class -1E-383 -> -Normal +clasx229 class -1E-100 -> -Normal +clasx230 class -1E-10 -> -Normal +clasx232 class -1E-1 -> -Normal +clasx233 class -1 -> -Normal +clasx234 class -2.50 -> -Normal +clasx235 class -100.100 -> -Normal +clasx236 class -1E+30 -> -Normal +clasx237 class -1E+384 -> -Normal +clasx238 class -9.999999999999999E+384 -> -Normal +clasx239 class -Inf -> -Infinity + +clasx241 class NaN -> NaN +clasx242 class -NaN -> NaN +clasx243 class +NaN12345 -> NaN +clasx244 class sNaN -> sNaN +clasx245 class -sNaN -> sNaN +clasx246 class +sNaN12345 -> sNaN + + + diff --git a/lib-python/2.7/test/decimaltestdata/comparetotal.decTest b/lib-python/2.7/test/decimaltestdata/comparetotal.decTest --- a/lib-python/2.7/test/decimaltestdata/comparetotal.decTest +++ b/lib-python/2.7/test/decimaltestdata/comparetotal.decTest @@ -1,798 +1,798 @@ ------------------------------------------------------------------------- --- comparetotal.decTest -- decimal comparison using total ordering -- --- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. -- ------------------------------------------------------------------------- --- Please see the document "General Decimal Arithmetic Testcases" -- --- at http://www2.hursley.ibm.com/decimal for the description of -- --- these testcases. -- --- -- --- These testcases are experimental ('beta' versions), and they -- --- may contain errors. They are offered on an as-is basis. In -- --- particular, achieving the same results as the tests here is not -- --- a guarantee that an implementation complies with any Standard -- --- or specification. The tests are not exhaustive. -- --- -- --- Please send comments, suggestions, and corrections to the author: -- --- Mike Cowlishaw, IBM Fellow -- --- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK -- --- mfc at uk.ibm.com -- ------------------------------------------------------------------------- -version: 2.59 - --- Note that we cannot assume add/subtract tests cover paths adequately, --- here, because the code might be quite different (comparison cannot --- overflow or underflow, so actual subtractions are not necessary). --- Similarly, comparetotal will have some radically different paths --- than compare. - -extended: 1 -precision: 16 -rounding: half_up -maxExponent: 384 -minExponent: -383 - --- sanity checks -cotx001 comparetotal -2 -2 -> 0 -cotx002 comparetotal -2 -1 -> -1 -cotx003 comparetotal -2 0 -> -1 -cotx004 comparetotal -2 1 -> -1 -cotx005 comparetotal -2 2 -> -1 -cotx006 comparetotal -1 -2 -> 1 -cotx007 comparetotal -1 -1 -> 0 -cotx008 comparetotal -1 0 -> -1 -cotx009 comparetotal -1 1 -> -1 -cotx010 comparetotal -1 2 -> -1 -cotx011 comparetotal 0 -2 -> 1 -cotx012 comparetotal 0 -1 -> 1 -cotx013 comparetotal 0 0 -> 0 -cotx014 comparetotal 0 1 -> -1 -cotx015 comparetotal 0 2 -> -1 -cotx016 comparetotal 1 -2 -> 1 -cotx017 comparetotal 1 -1 -> 1 -cotx018 comparetotal 1 0 -> 1 -cotx019 comparetotal 1 1 -> 0 -cotx020 comparetotal 1 2 -> -1 -cotx021 comparetotal 2 -2 -> 1 -cotx022 comparetotal 2 -1 -> 1 -cotx023 comparetotal 2 0 -> 1 -cotx025 comparetotal 2 1 -> 1 -cotx026 comparetotal 2 2 -> 0 - -cotx031 comparetotal -20 -20 -> 0 -cotx032 comparetotal -20 -10 -> -1 -cotx033 comparetotal -20 00 -> -1 -cotx034 comparetotal -20 10 -> -1 -cotx035 comparetotal -20 20 -> -1 -cotx036 comparetotal -10 -20 -> 1 -cotx037 comparetotal -10 -10 -> 0 -cotx038 comparetotal -10 00 -> -1 -cotx039 comparetotal -10 10 -> -1 -cotx040 comparetotal -10 20 -> -1 -cotx041 comparetotal 00 -20 -> 1 -cotx042 comparetotal 00 -10 -> 1 -cotx043 comparetotal 00 00 -> 0 -cotx044 comparetotal 00 10 -> -1 -cotx045 comparetotal 00 20 -> -1 -cotx046 comparetotal 10 -20 -> 1 -cotx047 comparetotal 10 -10 -> 1 -cotx048 comparetotal 10 00 -> 1 -cotx049 comparetotal 10 10 -> 0 -cotx050 comparetotal 10 20 -> -1 -cotx051 comparetotal 20 -20 -> 1 -cotx052 comparetotal 20 -10 -> 1 -cotx053 comparetotal 20 00 -> 1 -cotx055 comparetotal 20 10 -> 1 -cotx056 comparetotal 20 20 -> 0 - -cotx061 comparetotal -2.0 -2.0 -> 0 -cotx062 comparetotal -2.0 -1.0 -> -1 -cotx063 comparetotal -2.0 0.0 -> -1 -cotx064 comparetotal -2.0 1.0 -> -1 -cotx065 comparetotal -2.0 2.0 -> -1 -cotx066 comparetotal -1.0 -2.0 -> 1 -cotx067 comparetotal -1.0 -1.0 -> 0 -cotx068 comparetotal -1.0 0.0 -> -1 -cotx069 comparetotal -1.0 1.0 -> -1 -cotx070 comparetotal -1.0 2.0 -> -1 -cotx071 comparetotal 0.0 -2.0 -> 1 -cotx072 comparetotal 0.0 -1.0 -> 1 -cotx073 comparetotal 0.0 0.0 -> 0 -cotx074 comparetotal 0.0 1.0 -> -1 -cotx075 comparetotal 0.0 2.0 -> -1 -cotx076 comparetotal 1.0 -2.0 -> 1 -cotx077 comparetotal 1.0 -1.0 -> 1 -cotx078 comparetotal 1.0 0.0 -> 1 -cotx079 comparetotal 1.0 1.0 -> 0 -cotx080 comparetotal 1.0 2.0 -> -1 -cotx081 comparetotal 2.0 -2.0 -> 1 -cotx082 comparetotal 2.0 -1.0 -> 1 -cotx083 comparetotal 2.0 0.0 -> 1 -cotx085 comparetotal 2.0 1.0 -> 1 -cotx086 comparetotal 2.0 2.0 -> 0 - --- now some cases which might overflow if subtract were used -maxexponent: 999999999 -minexponent: -999999999 -cotx090 comparetotal 9.99999999E+999999999 9.99999999E+999999999 -> 0 -cotx091 comparetotal -9.99999999E+999999999 9.99999999E+999999999 -> -1 -cotx092 comparetotal 9.99999999E+999999999 -9.99999999E+999999999 -> 1 -cotx093 comparetotal -9.99999999E+999999999 -9.99999999E+999999999 -> 0 - --- Examples -cotx094 comparetotal 12.73 127.9 -> -1 -cotx095 comparetotal -127 12 -> -1 -cotx096 comparetotal 12.30 12.3 -> -1 -cotx097 comparetotal 12.30 12.30 -> 0 -cotx098 comparetotal 12.3 12.300 -> 1 -cotx099 comparetotal 12.3 NaN -> -1 - --- some differing length/exponent cases --- in this first group, compare would compare all equal -cotx100 comparetotal 7.0 7.0 -> 0 -cotx101 comparetotal 7.0 7 -> -1 -cotx102 comparetotal 7 7.0 -> 1 -cotx103 comparetotal 7E+0 7.0 -> 1 -cotx104 comparetotal 70E-1 7.0 -> 0 -cotx105 comparetotal 0.7E+1 7 -> 0 -cotx106 comparetotal 70E-1 7 -> -1 -cotx107 comparetotal 7.0 7E+0 -> -1 -cotx108 comparetotal 7.0 70E-1 -> 0 -cotx109 comparetotal 7 0.7E+1 -> 0 -cotx110 comparetotal 7 70E-1 -> 1 - -cotx120 comparetotal 8.0 7.0 -> 1 -cotx121 comparetotal 8.0 7 -> 1 -cotx122 comparetotal 8 7.0 -> 1 -cotx123 comparetotal 8E+0 7.0 -> 1 -cotx124 comparetotal 80E-1 7.0 -> 1 -cotx125 comparetotal 0.8E+1 7 -> 1 -cotx126 comparetotal 80E-1 7 -> 1 -cotx127 comparetotal 8.0 7E+0 -> 1 -cotx128 comparetotal 8.0 70E-1 -> 1 -cotx129 comparetotal 8 0.7E+1 -> 1 -cotx130 comparetotal 8 70E-1 -> 1 - -cotx140 comparetotal 8.0 9.0 -> -1 -cotx141 comparetotal 8.0 9 -> -1 -cotx142 comparetotal 8 9.0 -> -1 -cotx143 comparetotal 8E+0 9.0 -> -1 -cotx144 comparetotal 80E-1 9.0 -> -1 -cotx145 comparetotal 0.8E+1 9 -> -1 -cotx146 comparetotal 80E-1 9 -> -1 -cotx147 comparetotal 8.0 9E+0 -> -1 -cotx148 comparetotal 8.0 90E-1 -> -1 -cotx149 comparetotal 8 0.9E+1 -> -1 -cotx150 comparetotal 8 90E-1 -> -1 - --- and again, with sign changes -+ .. -cotx200 comparetotal -7.0 7.0 -> -1 -cotx201 comparetotal -7.0 7 -> -1 -cotx202 comparetotal -7 7.0 -> -1 -cotx203 comparetotal -7E+0 7.0 -> -1 -cotx204 comparetotal -70E-1 7.0 -> -1 -cotx205 comparetotal -0.7E+1 7 -> -1 -cotx206 comparetotal -70E-1 7 -> -1 -cotx207 comparetotal -7.0 7E+0 -> -1 -cotx208 comparetotal -7.0 70E-1 -> -1 -cotx209 comparetotal -7 0.7E+1 -> -1 -cotx210 comparetotal -7 70E-1 -> -1 - -cotx220 comparetotal -8.0 7.0 -> -1 -cotx221 comparetotal -8.0 7 -> -1 -cotx222 comparetotal -8 7.0 -> -1 From noreply at buildbot.pypy.org Mon Mar 26 14:10:14 2012 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Mar 2012 14:10:14 +0200 (CEST) Subject: [pypy-commit] pypy default: remove msvcXX.dll, users should install it independently Message-ID: <20120326121014.C203A820D9@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r53984:27550fd62027 Date: 2012-03-26 14:09 +0200 http://bitbucket.org/pypy/pypy/changeset/27550fd62027/ Log: remove msvcXX.dll, users should install it independently diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,33 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': - #What runtime do we need? - msvc_runtime = 'msvcr80.dll' #default is studio 2005 vc8 - try: - import subprocess - out,err = subprocess.Popen([str(pypy_c), '-c', - 'import sys; print sys.version'], - stdout=subprocess.PIPE).communicate() - indx=out.find('MSC v.') + 6 - if indx> 10: - if out[indx:].startswith('1600'): - msvc_runtime = 'msvcr100.dll' #studio 2010 vc10 - elif out[indx:].startwith('1500'): - msvc_runtime = 'msvcr90.dll' #studio 2009 vc9 - elif out[indx:].startswith('1400'): - msvc_runtime = 'msvcr80.dll' #studio 2005 vc8 - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - except : - pass + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', msvc_runtime, + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Mon Mar 26 14:18:54 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 14:18:54 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a note about I/O and inevitable transactions. Message-ID: <20120326121854.73AFF820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r350:de1d652d1462 Date: 2012-03-26 14:18 +0200 http://bitbucket.org/pypy/pypy.org/changeset/de1d652d1462/ Log: Add a note about I/O and inevitable transactions. diff --git a/source/tmdonate.txt b/source/tmdonate.txt --- a/source/tmdonate.txt +++ b/source/tmdonate.txt @@ -343,6 +343,11 @@ to conflict in the simple case). This part is not included in the estimates. + Note: by default, any I/O can be done, but turns the transaction + "inevitable". An inevitable transaction must not abort, so it must be + the next one to commit. This introduces delays at the end of the other + CPUs' transactions. + Total: 5 months for the initial version; at least 8 additional months for the fast version. We will go with a total estimate of 15 months, corresponding to USD$151200. The amount sought by this fundraising diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -304,6 +304,10 @@ (e.g. two transactions that each do samelist.append() do not need to conflict in the simple case). This part is not included in the estimates.

    +

    Note: by default, any I/O can be done, but turns the transaction +“inevitable”. An inevitable transaction must not abort, so it must be +the next one to commit. This introduces delays at the end of the other +CPUs' transactions.

    Total: 5 months for the initial version; at least 8 additional months From noreply at buildbot.pypy.org Mon Mar 26 14:40:10 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 14:40:10 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Typo. Message-ID: <20120326124010.97B8F820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53985:c8b3230070f1 Date: 2012-03-26 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c8b3230070f1/ Log: Typo. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -202,7 +202,7 @@ return self.strategy.getitems_str(self) def getitems_int(self): - """ Return the items in the list as unwrapped strings. If the list does + """ Return the items in the list as unwrapped ints. If the list does not use the list strategy, return None. """ return self.strategy.getitems_int(self) # ___________________________________________________ From noreply at buildbot.pypy.org Mon Mar 26 14:40:11 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 14:40:11 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Add more tests that don't pass. Message-ID: <20120326124011.D2037820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53986:31898406b59e Date: 2012-03-26 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/31898406b59e/ Log: Add more tests that don't pass. diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -486,6 +486,14 @@ list.__init__(l, ['a', 'b', 'c']) assert l is l0 assert l == ['a', 'b', 'c'] + list.__init__(l) + assert l == [] + + def test_explicit_new_init_more_cases(self): + for assignment in [[], (), [3], ["foo"]]: + l = [1, 2] + l.__init__(assignment) + assert l == list(assignment) def test_extend_list(self): l = l0 = [1] From noreply at buildbot.pypy.org Mon Mar 26 16:13:24 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Mon, 26 Mar 2012 16:13:24 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20120326141324.22549820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r53987:5e2fda838296 Date: 2012-03-26 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/5e2fda838296/ Log: Merge with default diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -56,12 +56,23 @@ xxx -* JIT +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. - xxx +.. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` - equivalents in RPython code. `Garbage collection in PyPy`_ is inserted +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real GC written as more RPython code. The best one we have so far is in ``rpython/memory/gc/minimark.py``. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -16,13 +16,15 @@ appleveldefs = {} interpleveldefs = {} if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" interpleveldefs["clock_getres"] = "interp_time.clock_getres" for name in [ "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" ]: - interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name class Module(MixedModule): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.interpreter.error import exception_from_errno diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -6,6 +6,7 @@ in which it does not work. """ +from __future__ import with_statement from pypy.annotation import model as annmodel from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,9 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' + for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Mon Mar 26 16:21:49 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:21:49 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Regularize the interface by not having the *_update() method return Message-ID: <20120326142149.0D97E820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53988:69592b18ba35 Date: 2012-03-26 15:06 +0200 http://bitbucket.org/pypy/pypy/changeset/69592b18ba35/ Log: Regularize the interface by not having the *_update() method return anything. Add comments that each w_other is supposed to be a set too. Fix a typo. Kill apparently unneeded check_for_unhashable_objects(). diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -110,35 +110,35 @@ return self.strategy.difference(self, w_other) def difference_update(self, w_other): - """ As difference but overwrites the sets content with the result. """ - return self.strategy.difference_update(self, w_other) + """ As difference but overwrites the sets content with the result. W_other must be a set.""" + self.strategy.difference_update(self, w_other) def symmetric_difference(self, w_other): """ Returns a set with all items that are either in this set or in w_other, but not in both. W_other must be a set. """ return self.strategy.symmetric_difference(self, w_other) def symmetric_difference_update(self, w_other): - """ As symmetric_difference but overwrites the content of the set with the result. """ - return self.strategy.symmetric_difference_update(self, w_other) + """ As symmetric_difference but overwrites the content of the set with the result. W_other must be a set.""" + self.strategy.symmetric_difference_update(self, w_other) def intersect(self, w_other): """ Returns a set with all items that exists in both sets, this set and in w_other. W_other must be a set. """ return self.strategy.intersect(self, w_other) def intersect_update(self, w_other): - """ Keeps only those elements found in both sets, removing all other elements. """ - return self.strategy.intersect_update(self, w_other) + """ Keeps only those elements found in both sets, removing all other elements. W_other must be a set.""" + self.strategy.intersect_update(self, w_other) def issubset(self, w_other): """ Checks wether this set is a subset of w_other. W_other must be a set. """ return self.strategy.issubset(self, w_other) def isdisjoint(self, w_other): - """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. """ + """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. W_other must be a set.""" return self.strategy.isdisjoint(self, w_other) def update(self, w_other): - """ Appends all elements from the given set to this set. """ + """ Appends all elements from the given set to this set. W_other must be a set.""" self.strategy.update(self, w_other) def has_key(self, w_key): @@ -146,7 +146,7 @@ return self.strategy.has_key(self, w_key) def equals(self, w_other): - """ Checks wether this set and the given set are equal, i.e. contain the same elements. """ + """ Checks wether this set and the given set are equal, i.e. contain the same elements. W_other must be a set.""" return self.strategy.equals(self, w_other) def iter(self): @@ -281,17 +281,6 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) - def check_for_unhashable_objects(self, w_iterable): - w_iterator = self.space.iter(w_iterable) - while True: - try: - elem = self.space.next(w_iterator) - self.space.hash(elem) - except OperationError, e: - if not e.match(self.space, self.space.w_StopIteration): - raise - break - def get_empty_storage(self): return self.erase(None) @@ -344,15 +333,13 @@ return w_set.copy_real() def difference_update(self, w_set, w_other): - self.check_for_unhashable_objects(w_other) + pass def intersect(self, w_set, w_other): - self.check_for_unhashable_objects(w_other) return w_set.copy_real() def intersect_update(self, w_set, w_other): - self.check_for_unhashable_objects(w_other) - return w_set.copy_real() + pass def isdisjoint(self, w_set, w_other): return True @@ -622,7 +609,6 @@ storage, strategy = self._intersect_base(w_set, w_other) w_set.strategy = strategy w_set.sstorage = storage - return w_set def _issubset_unwrapped(self, w_set, w_other): d_other = self.unerase(w_other.sstorage) @@ -1061,7 +1047,7 @@ # tested in test_buildinshortcut.py #XXX do not make new setobject here w_other_as_set = w_left._newobj(space, w_other) - return space.wrap(w_left.equals(w_other)) + return space.wrap(w_left.equals(w_other_as_set)) eq__Set_frozensettypedef = eq__Set_settypedef eq__Frozenset_settypedef = eq__Set_settypedef @@ -1084,7 +1070,7 @@ def ne__Set_settypedef(space, w_left, w_other): #XXX this is not tested w_other_as_set = w_left._newobj(space, w_other) - return space.wrap(not w_left.equals(w_other)) + return space.wrap(not w_left.equals(w_other_as_set)) ne__Set_frozensettypedef = ne__Set_settypedef ne__Frozenset_settypedef = ne__Set_settypedef @@ -1298,7 +1284,8 @@ return def inplace_and__Set_Set(space, w_left, w_other): - return w_left.intersect_update(w_other) + w_left.intersect_update(w_other) + return w_left inplace_and__Set_Frozenset = inplace_and__Set_Set From noreply at buildbot.pypy.org Mon Mar 26 16:21:50 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:21:50 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Optimize and fix _convert_set_to_frozenset(). Message-ID: <20120326142150.4A553820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53989:55952e613207 Date: 2012-03-26 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/55952e613207/ Log: Optimize and fix _convert_set_to_frozenset(). The two cases in copy_real() contain the same code. Kill a useless special case. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -399,16 +399,11 @@ w_set.switch_to_empty_strategy() def copy_real(self, w_set): + # may be used internally on frozen sets, although frozenset().copy() + # returns self in frozenset_copy__Frozenset. strategy = w_set.strategy - if isinstance(w_set, W_FrozensetObject): - # only used internally since frozenset().copy() - # returns self in frozenset_copy__Frozenset - d = self.unerase(w_set.sstorage) - storage = self.erase(d.copy()) - #storage = w_set.sstorage - else: - d = self.unerase(w_set.sstorage) - storage = self.erase(d.copy()) + d = self.unerase(w_set.sstorage) + storage = self.erase(d.copy()) clone = w_set.from_storage_and_strategy(storage, strategy) return clone @@ -958,14 +953,15 @@ set_strategy_and_setdata(space, w_obj, w_iterable) def _convert_set_to_frozenset(space, w_obj): - #XXX can be optimized - if space.is_true(space.isinstance(w_obj, space.w_set)): - assert isinstance(w_obj, W_SetObject) - #XXX better instantiate? + if isinstance(w_obj, W_SetObject): w_frozen = W_FrozensetObject(space, None) w_frozen.strategy = w_obj.strategy w_frozen.sstorage = w_obj.sstorage return w_frozen + elif space.isinstance_w(w_obj, space.w_set): + w_frz = space.allocate_instance(W_FrozensetObject, space.w_frozenset) + W_FrozensetObject.__init__(w_frz, space, w_obj) + return w_frz else: return None @@ -1011,8 +1007,6 @@ sub__Frozenset_Frozenset = sub__Set_Set def set_difference__Set(space, w_left, others_w): - if len(others_w) == 0: - return w_left.copy_real() result = w_left.copy_real() set_difference_update__Set(space, result, others_w) return result From noreply at buildbot.pypy.org Mon Mar 26 16:21:51 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:21:51 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Simplifications. Message-ID: <20120326142151.8C77E820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53990:b87139b22ee8 Date: 2012-03-26 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b87139b22ee8/ Log: Simplifications. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -160,26 +160,26 @@ class W_SetObject(W_BaseSetObject): from pypy.objspace.std.settype import set_typedef as typedef - def _newobj(w_self, space, rdict_w): - """Make a new set by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new set by taking ownership of 'w_iterable'.""" if type(w_self) is W_SetObject: - return W_SetObject(space, rdict_w) + return W_SetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_SetObject, w_type) - W_SetObject.__init__(w_obj, space, rdict_w) + W_SetObject.__init__(w_obj, space, w_iterable) return w_obj class W_FrozensetObject(W_BaseSetObject): from pypy.objspace.std.frozensettype import frozenset_typedef as typedef hash = 0 - def _newobj(w_self, space, rdict_w): - """Make a new frozenset by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new frozenset by taking ownership of 'w_iterable'.""" if type(w_self) is W_FrozensetObject: - return W_FrozensetObject(space, rdict_w) + return W_FrozensetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_FrozensetObject, w_type) - W_FrozensetObject.__init__(w_obj, space, rdict_w) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj registerimplementation(W_BaseSetObject) @@ -1230,10 +1230,9 @@ and__Frozenset_Set = and__Set_Set and__Frozenset_Frozenset = and__Set_Set -def _intersection_multiple(space, w_left, others_w): +def set_intersection__Set(space, w_left, others_w): #XXX find smarter implementations - others_w = others_w[:] # original others_w can't be resized - others_w.append(w_left) + others_w = [w_left] + others_w # find smallest set in others_w to reduce comparisons startindex, startlength = 0, -1 @@ -1263,12 +1262,6 @@ result.intersect_update(w_other_as_set) return result -def set_intersection__Set(space, w_left, others_w): - if len(others_w) == 0: - return w_left.copy_real() - else: - return _intersection_multiple(space, w_left, others_w) - frozenset_intersection__Frozenset = set_intersection__Set def set_intersection_update__Set(space, w_left, others_w): From noreply at buildbot.pypy.org Mon Mar 26 16:21:52 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:21:52 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Note Message-ID: <20120326142152.CD15E820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53991:1c2906d64ffe Date: 2012-03-26 16:05 +0200 http://bitbucket.org/pypy/pypy/changeset/1c2906d64ffe/ Log: Note diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -862,6 +862,8 @@ class W_SetIterObject(W_Object): from pypy.objspace.std.settype import setiter_typedef as typedef + # XXX this class should be killed, and the various + # iterimplementations should be W_Objects directly. def __init__(w_self, space, iterimplementation): w_self.space = space From noreply at buildbot.pypy.org Mon Mar 26 16:21:54 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:21:54 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Fix test_listobject by moving 'w_list.__init__(space, [])' to the start Message-ID: <20120326142154.149F0820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53992:7127deeed841 Date: 2012-03-26 16:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7127deeed841/ Log: Fix test_listobject by moving 'w_list.__init__(space, [])' to the start of the function again, and optimizing it to 'w_list.clear()', which does not perform any allocation if liststrategies are enabled. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,6 +139,15 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) + def clear(self): + """Make the listobject empty.""" + if self.space.config.objspace.std.withliststrategies: + strategy = self.space.fromcache(EmptyListStrategy) + else: + strategy = self.space.fromcache(ObjectListStrategy) + self.strategy = strategy + strategy.clear(self) + def clone(self): """Returns a clone by creating a new listobject with the same strategy and a copy of the storage""" @@ -366,6 +375,9 @@ assert len(list_w) == 0 w_list.lstorage = self.erase(None) + def clear(self, w_list): + w_list.lstorage = self.erase(None) + erase, unerase = rerased.new_erasing_pair("empty") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -949,6 +961,9 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.erase(list_w) + def clear(self, w_list): + w_list.lstorage = self.erase([]) + def contains(self, w_list, w_obj): return ListStrategy.contains(self, w_list, w_obj) @@ -1052,6 +1067,7 @@ # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) + w_list.clear() if w_iterable is not None: if isinstance(w_iterable, W_ListObject): w_iterable.copy_into(w_list) @@ -1074,7 +1090,6 @@ w_list.lstorage = strategy.erase(strlist[:]) return - w_list.__init__(space, []) # xxx special hack for speed from pypy.interpreter.generator import GeneratorIterator if isinstance(w_iterable, GeneratorIterator): @@ -1082,8 +1097,6 @@ return # /xxx _init_from_iterable(space, w_list, w_iterable) - else: - w_list.__init__(space, []) def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List From noreply at buildbot.pypy.org Mon Mar 26 16:30:12 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 16:30:12 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Performance improvement. Message-ID: <20120326143012.26E34820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r53993:c49198dda243 Date: 2012-03-26 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/c49198dda243/ Log: Performance improvement. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,12 +139,13 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) - def clear(self): - """Make the listobject empty.""" - if self.space.config.objspace.std.withliststrategies: - strategy = self.space.fromcache(EmptyListStrategy) + def clear(self, space): + """Initializes (or overrides) the listobject as empty.""" + self.space = space + if space.config.objspace.std.withliststrategies: + strategy = space.fromcache(EmptyListStrategy) else: - strategy = self.space.fromcache(ObjectListStrategy) + strategy = space.fromcache(ObjectListStrategy) self.strategy = strategy strategy.clear(self) @@ -1067,7 +1068,7 @@ # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - w_list.clear() + w_list.clear(space) if w_iterable is not None: if isinstance(w_iterable, W_ListObject): w_iterable.copy_into(w_list) diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, space, []) + w_obj.clear(space) return w_obj # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Mar 26 18:52:31 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 18:52:31 +0200 (CEST) Subject: [pypy-commit] pypy default: skip this test on pythn <2.7 Message-ID: <20120326165231.60E28822B0@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53995:488d8188363e Date: 2012-03-26 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/488d8188363e/ Log: skip this test on pythn <2.7 diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): From noreply at buildbot.pypy.org Mon Mar 26 18:52:30 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 18:52:30 +0200 (CEST) Subject: [pypy-commit] pypy default: I'm sure this is no longer necessary Message-ID: <20120326165230.22F81820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53994:17e5fb6b2717 Date: 2012-03-25 18:44 +0200 http://bitbucket.org/pypy/pypy/changeset/17e5fb6b2717/ Log: I'm sure this is no longer necessary diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): From noreply at buildbot.pypy.org Mon Mar 26 18:52:32 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 18:52:32 +0200 (CEST) Subject: [pypy-commit] pypy default: merge; Message-ID: <20120326165232.99C9F820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53996:a663a613379a Date: 2012-03-26 18:51 +0200 http://bitbucket.org/pypy/pypy/changeset/a663a613379a/ Log: merge; diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): From noreply at buildbot.pypy.org Mon Mar 26 18:54:21 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 18:54:21 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-out: merge default Message-ID: <20120326165421.E1188820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpypy-out Changeset: r53997:e43ad1eda2c5 Date: 2012-03-26 18:53 +0200 http://bitbucket.org/pypy/pypy/changeset/e43ad1eda2c5/ Log: merge default diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -56,12 +56,23 @@ xxx -* JIT +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. - xxx +.. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors: as you can notice, there are no ``Py_INCREF/Py_DECREF`` - equivalents in RPython code. `Garbage collection in PyPy`_ is inserted +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real GC written as more RPython code. The best one we have so far is in ``rpython/memory/gc/minimark.py``. diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -16,13 +16,15 @@ appleveldefs = {} interpleveldefs = {} if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" interpleveldefs["clock_getres"] = "interp_time.clock_getres" for name in [ "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" ]: - interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name class Module(MixedModule): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.interpreter.error import exception_from_errno diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py --- a/pypy/rlib/_rffi_stacklet.py +++ b/pypy/rlib/_rffi_stacklet.py @@ -14,7 +14,7 @@ includes = ['src/stacklet/stacklet.h'], separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'], ) -if sys.platform == 'win32': +if 'masm' in dir(eci.platform): # Microsoft compiler if is_emulated_long: asmsrc = 'switch_x64_msvc.asm' else: diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -114,9 +114,10 @@ ) eci = rffi_platform.configure_external_library( - 'libffi', eci, + 'libffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), + dict(prefix=r'c:\mingw64', include_dir='include', library_dir='lib'), ]) else: libffidir = py.path.local(pypydir).join('translator', 'c', 'src', 'libffi_msvc') diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -6,6 +6,7 @@ in which it does not work. """ +from __future__ import with_statement from pypy.annotation import model as annmodel from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -141,6 +141,10 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include + #ifdef __GNUC__ + #define _dosmaperr mingw_dosmaperr + #endif int main() { int i; diff --git a/pypy/rlib/test/autopath.py b/pypy/rlib/test/autopath.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/autopath.py @@ -0,0 +1,131 @@ +""" +self cloning, automatic path configuration + +copy this into any subdirectory of pypy from which scripts need +to be run, typically all of the test subdirs. +The idea is that any such script simply issues + + import autopath + +and this will make sure that the parent directory containing "pypy" +is in sys.path. + +If you modify the master "autopath.py" version (in pypy/tool/autopath.py) +you can directly run it which will copy itself on all autopath.py files +it finds under the pypy root directory. + +This module always provides these attributes: + + pypydir pypy root directory path + this_dir directory where this autopath.py resides + +""" + +def __dirinfo(part): + """ return (partdir, this_dir) and insert parent of partdir + into sys.path. If the parent directories don't have the part + an EnvironmentError is raised.""" + + import sys, os + try: + head = this_dir = os.path.realpath(os.path.dirname(__file__)) + except NameError: + head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + + error = None + while head: + partdir = head + head, tail = os.path.split(head) + if tail == part: + checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py') + if not os.path.exists(checkfile): + error = "Cannot find %r" % (os.path.normpath(checkfile),) + break + else: + error = "Cannot find the parent directory %r of the path %r" % ( + partdir, this_dir) + if not error: + # check for bogus end-of-line style (e.g. files checked out on + # Windows and moved to Unix) + f = open(__file__.replace('.pyc', '.py'), 'r') + data = f.read() + f.close() + if data.endswith('\r\n') or data.endswith('\r'): + error = ("Bad end-of-line style in the .py files. Typically " + "caused by a zip file or a checkout done on Windows and " + "moved to Unix or vice-versa.") + if error: + raise EnvironmentError("Invalid source tree - bogus checkout! " + + error) + + pypy_root = os.path.join(head, '') + try: + sys.path.remove(head) + except ValueError: + pass + sys.path.insert(0, head) + + munged = {} + for name, mod in sys.modules.items(): + if '.' in name: + continue + fn = getattr(mod, '__file__', None) + if not isinstance(fn, str): + continue + newname = os.path.splitext(os.path.basename(fn))[0] + if not newname.startswith(part + '.'): + continue + path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') + if path.startswith(pypy_root) and newname != part: + modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) + if newname != '__init__': + modpaths.append(newname) + modpath = '.'.join(modpaths) + if modpath not in sys.modules: + munged[modpath] = mod + + for name, mod in munged.iteritems(): + if name not in sys.modules: + sys.modules[name] = mod + if '.' in name: + prename = name[:name.rfind('.')] + postname = name[len(prename)+1:] + if prename not in sys.modules: + __import__(prename) + if not hasattr(sys.modules[prename], postname): + setattr(sys.modules[prename], postname, mod) + + return partdir, this_dir + +def __clone(): + """ clone master version of autopath.py into all subdirs """ + from os.path import join, walk + if not this_dir.endswith(join('pypy','tool')): + raise EnvironmentError("can only clone master version " + "'%s'" % join(pypydir, 'tool',_myname)) + + + def sync_walker(arg, dirname, fnames): + if _myname in fnames: + fn = join(dirname, _myname) + f = open(fn, 'rwb+') + try: + if f.read() == arg: + print "checkok", fn + else: + print "syncing", fn + f = open(fn, 'w') + f.write(arg) + finally: + f.close() + s = open(join(pypydir, 'tool', _myname), 'rb').read() + walk(pypydir, sync_walker, s) + +_myname = 'autopath.py' + +# set guaranteed attributes + +pypydir, this_dir = __dirinfo('pypy') + +if __name__ == '__main__': + __clone() diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -660,8 +660,8 @@ if isinstance(fieldtype, lltype.FixedSizeArray): size, _ = expected_size_and_sign return lltype.FixedSizeArray(fieldtype.OF, size/_sizeof(fieldtype.OF)) - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) + raise TypeError("conflict between translating python and compiler field" + " type %r for %r" % (fieldtype, fieldname)) def expose_value_as_rpython(value): if intmask(value) == value: diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,9 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' + for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', + 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/c/src/libffi_msvc/win64.asm b/pypy/translator/c/src/libffi_msvc/win64.asm new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/libffi_msvc/win64.asm @@ -0,0 +1,156 @@ +PUBLIC ffi_call_AMD64 + +EXTRN __chkstk:NEAR +EXTRN ffi_closure_SYSV:NEAR + +_TEXT SEGMENT + +;;; ffi_closure_OUTER will be called with these registers set: +;;; rax points to 'closure' +;;; r11 contains a bit mask that specifies which of the +;;; first four parameters are float or double +;;; +;;; It must move the parameters passed in registers to their stack location, +;;; call ffi_closure_SYSV for the actual work, then return the result. +;;; +ffi_closure_OUTER PROC FRAME + ;; save actual arguments to their stack space. + test r11, 1 + jne first_is_float + mov QWORD PTR [rsp+8], rcx + jmp second +first_is_float: + movlpd QWORD PTR [rsp+8], xmm0 + +second: + test r11, 2 + jne second_is_float + mov QWORD PTR [rsp+16], rdx + jmp third +second_is_float: + movlpd QWORD PTR [rsp+16], xmm1 + +third: + test r11, 4 + jne third_is_float + mov QWORD PTR [rsp+24], r8 + jmp forth +third_is_float: + movlpd QWORD PTR [rsp+24], xmm2 + +forth: + test r11, 8 + jne forth_is_float + mov QWORD PTR [rsp+32], r9 + jmp done +forth_is_float: + movlpd QWORD PTR [rsp+32], xmm3 + +done: +.ALLOCSTACK 40 + sub rsp, 40 +.ENDPROLOG + mov rcx, rax ; context is first parameter + mov rdx, rsp ; stack is second parameter + add rdx, 40 ; correct our own area + mov rax, ffi_closure_SYSV + call rax ; call the real closure function + ;; Here, code is missing that handles float return values + add rsp, 40 + movd xmm0, rax ; In case the closure returned a float. + ret 0 +ffi_closure_OUTER ENDP + + +;;; ffi_call_AMD64 + +stack$ = 0 +prepfunc$ = 32 +ecif$ = 40 +bytes$ = 48 +flags$ = 56 +rvalue$ = 64 +fn$ = 72 + +ffi_call_AMD64 PROC FRAME + + mov QWORD PTR [rsp+32], r9 + mov QWORD PTR [rsp+24], r8 + mov QWORD PTR [rsp+16], rdx + mov QWORD PTR [rsp+8], rcx +.PUSHREG rbp + push rbp +.ALLOCSTACK 48 + sub rsp, 48 ; 00000030H +.SETFRAME rbp, 32 + lea rbp, QWORD PTR [rsp+32] +.ENDPROLOG + + mov eax, DWORD PTR bytes$[rbp] + add rax, 15 + and rax, -16 + call __chkstk + sub rsp, rax + lea rax, QWORD PTR [rsp+32] + mov QWORD PTR stack$[rbp], rax + + mov rdx, QWORD PTR ecif$[rbp] + mov rcx, QWORD PTR stack$[rbp] + call QWORD PTR prepfunc$[rbp] + + mov rsp, QWORD PTR stack$[rbp] + + movlpd xmm3, QWORD PTR [rsp+24] + movd r9, xmm3 + + movlpd xmm2, QWORD PTR [rsp+16] + movd r8, xmm2 + + movlpd xmm1, QWORD PTR [rsp+8] + movd rdx, xmm1 + + movlpd xmm0, QWORD PTR [rsp] + movd rcx, xmm0 + + call QWORD PTR fn$[rbp] +ret_int$: + cmp DWORD PTR flags$[rbp], 1 ; FFI_TYPE_INT + jne ret_float$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov DWORD PTR [rcx], eax + jmp SHORT ret_nothing$ + +ret_float$: + cmp DWORD PTR flags$[rbp], 2 ; FFI_TYPE_FLOAT + jne SHORT ret_double$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_double$: + cmp DWORD PTR flags$[rbp], 3 ; FFI_TYPE_DOUBLE + jne SHORT ret_int64$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_int64$: + cmp DWORD PTR flags$[rbp], 12 ; FFI_TYPE_SINT64 + jne ret_nothing$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov QWORD PTR [rcx], rax + jmp SHORT ret_nothing$ + +ret_nothing$: + xor eax, eax + + lea rsp, QWORD PTR [rbp+16] + pop rbp + ret 0 +ffi_call_AMD64 ENDP +_TEXT ENDS +END diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -301,6 +301,8 @@ global platform log.msg("Setting platform to %r cc=%s" % (new_platform,cc)) platform = pick_platform(new_platform, cc) + if not platform: + raise ValueError("pick_platform failed") if new_platform == 'host': global host diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -7,15 +7,27 @@ from pypy.translator.platform import log, _run_subprocess from pypy.translator.platform import Platform, posix +def _get_compiler_type(cc, x64_flag): + import subprocess + if not cc: + cc = os.environ.get('CC','') + if not cc: + return MsvcPlatform(cc=cc, x64=x64_flag) + elif cc.startswith('mingw'): + return MingwPlatform(cc) + try: + subprocess.check_output([cc, '--version']) + except: + raise ValueError,"Could not find compiler specified by cc option" + \ + " '%s', it must be a valid exe file on your path"%cc + return MingwPlatform(cc) + def Windows(cc=None): - if cc == 'mingw32': - return MingwPlatform(cc) - else: - return MsvcPlatform(cc, False) + return _get_compiler_type(cc, False) + +def Windows_x64(cc=None): + return _get_compiler_type(cc, True) -def Windows_x64(cc=None): - return MsvcPlatform(cc, True) - def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -31,14 +43,16 @@ vcvars = os.path.join(toolsdir, 'vsvars32.bat') import subprocess - popen = subprocess.Popen('"%s" & set' % (vcvars,), + try: + popen = subprocess.Popen('"%s" & set' % (vcvars,), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = popen.communicate() - if popen.wait() != 0: - return - + stdout, stderr = popen.communicate() + if popen.wait() != 0: + return None + except: + return None env = {} stdout = stdout.replace("\r\n", "\n") @@ -395,7 +409,9 @@ so_ext = 'dll' def __init__(self, cc=None): - Platform.__init__(self, 'gcc') + if not cc: + cc = 'gcc' + Platform.__init__(self, cc) def _args_for_shared(self, args): return ['-shared'] + args From noreply at buildbot.pypy.org Mon Mar 26 18:59:11 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 18:59:11 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-out: dot does not accept out on new numpy Message-ID: <20120326165911.1DB9D820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpypy-out Changeset: r53998:473b4e25f4c1 Date: 2012-03-26 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/473b4e25f4c1/ Log: dot does not accept out on new numpy diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -167,7 +167,8 @@ else: out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis, False, out) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -219,15 +220,15 @@ descr_argmax = _reduce_argmax_argmin_impl("max") descr_argmin = _reduce_argmax_argmin_impl("min") - def descr_dot(self, space, w_other, w_out=None): + def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: - w_res = self.descr_mul(space, other, w_out) + w_res = self.descr_mul(space, other) assert isinstance(w_res, BaseArray) - return w_res.descr_sum(space, space.wrap(-1), w_out) + return w_res.descr_sum(space, space.wrap(-1)) dtype = interp_ufuncs.find_binop_result_dtype(space, self.find_dtype(), other.find_dtype()) if self.size < 1 and other.size < 1: From noreply at buildbot.pypy.org Mon Mar 26 19:04:39 2012 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Mar 2012 19:04:39 +0200 (CEST) Subject: [pypy-commit] pypy default: merge numpypy-out. adds out=xxx parameter to most things. still missing some out= parameters on stuff imported from numpy Message-ID: <20120326170439.07D11820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r53999:78e10bad6694 Date: 2012-03-26 19:04 +0200 http://bitbucket.org/pypy/pypy/changeset/78e10bad6694/ Log: merge numpypy-out. adds out=xxx parameter to most things. still missing some out= parameters on stuff imported from numpy diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -62,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -269,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -83,8 +83,9 @@ return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -514,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -714,11 +725,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): @@ -727,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -773,8 +790,9 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -786,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -793,8 +817,9 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right @@ -832,8 +857,13 @@ Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig class ToStringArray(Call1): def __init__(self, child): @@ -842,9 +872,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, @@ -950,7 +980,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,17 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) class ToStringSignature(Call1): def __init__(self, dtype, child): @@ -327,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -455,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -995,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1495,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, From noreply at buildbot.pypy.org Mon Mar 26 19:40:28 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 19:40:28 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Test and fix. Message-ID: <20120326174028.7EBF1820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54000:605b7d796555 Date: 2012-03-26 19:35 +0200 http://bitbucket.org/pypy/pypy/changeset/605b7d796555/ Log: Test and fix. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1243,9 +1243,10 @@ try: length = space.int_w(space.len(w_other)) except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - continue + if (e.match(space, space.w_TypeError) or + e.match(space, space.w_AttributeError)): + continue + raise if startlength == -1 or length < startlength: startindex = i diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -900,3 +900,10 @@ raises(AttributeError, "frozenset().difference_update()") raises(AttributeError, "frozenset().symmetric_difference_update()") raises(AttributeError, "frozenset().intersection_update()") + + def test_intersection_obj(self): + class Obj: + def __getitem__(self, i): + return [5, 3, 4][i] + s = set([10,3,2]).intersection(Obj()) + assert list(s) == [3] From noreply at buildbot.pypy.org Mon Mar 26 19:40:29 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 19:40:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Horrible typo in horrible code. Message-ID: <20120326174029.BC6B9820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54001:eab13da2556c Date: 2012-03-26 19:36 +0200 http://bitbucket.org/pypy/pypy/changeset/eab13da2556c/ Log: Horrible typo in horrible code. diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: From noreply at buildbot.pypy.org Mon Mar 26 19:40:31 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 19:40:31 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120326174031.196E9820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54002:9e2168564ef3 Date: 2012-03-26 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/9e2168564ef3/ Log: merge heads diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -62,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -269,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -83,8 +83,9 @@ return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -514,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -714,11 +725,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): @@ -727,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -773,8 +790,9 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -786,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -793,8 +817,9 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right @@ -832,8 +857,13 @@ Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig class ToStringArray(Call1): def __init__(self, child): @@ -842,9 +872,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, @@ -950,7 +980,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,17 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) class ToStringSignature(Call1): def __init__(self, dtype, child): @@ -327,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -455,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -995,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1495,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,33 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': - #What runtime do we need? - msvc_runtime = 'msvcr80.dll' #default is studio 2005 vc8 - try: - import subprocess - out,err = subprocess.Popen([str(pypy_c), '-c', - 'import sys; print sys.version'], - stdout=subprocess.PIPE).communicate() - indx=out.find('MSC v.') + 6 - if indx> 10: - if out[indx:].startswith('1600'): - msvc_runtime = 'msvcr100.dll' #studio 2010 vc10 - elif out[indx:].startwith('1500'): - msvc_runtime = 'msvcr90.dll' #studio 2009 vc9 - elif out[indx:].startswith('1400'): - msvc_runtime = 'msvcr80.dll' #studio 2005 vc8 - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - except : - pass + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', msvc_runtime, + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Mon Mar 26 19:40:32 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 19:40:32 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: hg merge default Message-ID: <20120326174032.6D922820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54003:83af076f86a8 Date: 2012-03-26 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/83af076f86a8/ Log: hg merge default diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -62,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -269,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -83,8 +83,9 @@ return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -514,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -714,11 +725,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): @@ -727,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -773,8 +790,9 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -786,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -793,8 +817,9 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right @@ -832,8 +857,13 @@ Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig class ToStringArray(Call1): def __init__(self, child): @@ -842,9 +872,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, @@ -950,7 +980,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,17 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) class ToStringSignature(Call1): def __init__(self, dtype, child): @@ -327,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -455,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -995,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1495,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,33 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': - #What runtime do we need? - msvc_runtime = 'msvcr80.dll' #default is studio 2005 vc8 - try: - import subprocess - out,err = subprocess.Popen([str(pypy_c), '-c', - 'import sys; print sys.version'], - stdout=subprocess.PIPE).communicate() - indx=out.find('MSC v.') + 6 - if indx> 10: - if out[indx:].startswith('1600'): - msvc_runtime = 'msvcr100.dll' #studio 2010 vc10 - elif out[indx:].startwith('1500'): - msvc_runtime = 'msvcr90.dll' #studio 2009 vc9 - elif out[indx:].startswith('1400'): - msvc_runtime = 'msvcr80.dll' #studio 2005 vc8 - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - else: - print 'Cannot determine runtime dll for pypy' \ - ' version "%s"'%out - except : - pass + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', msvc_runtime, + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): From noreply at buildbot.pypy.org Mon Mar 26 20:08:16 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 20:08:16 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Failing test. Message-ID: <20120326180816.1F211820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54004:84913cf56f5e Date: 2012-03-26 19:54 +0200 http://bitbucket.org/pypy/pypy/changeset/84913cf56f5e/ Log: Failing test. diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1181,6 +1181,16 @@ assert l == [] assert list(g) == [] + def test_uses_custom_iterator(self): + for base, arg in [(list, []), (list, [5]), (list, ['x']), + (dict, []), (dict, [(5,6)]), (dict, [('x',7)]), + (tuple, []), (tuple, [5]), (tuple, ['x']), + (str, 'hello'), (unicode, 'hello')]: + class SubClass(base): + def __iter__(self): + return iter("foobar") + assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] + class AppTestForRangeLists(AppTestW_ListObject): def setup_class(cls): From noreply at buildbot.pypy.org Mon Mar 26 20:08:18 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 20:08:18 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: - weaken the test as needed for OrderedDict and fix it Message-ID: <20120326180818.98A94820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54005:eef2e1c3a6d1 Date: 2012-03-26 20:07 +0200 http://bitbucket.org/pypy/pypy/changeset/eef2e1c3a6d1/ Log: - weaken the test as needed for OrderedDict and fix it - add frozenset support to space.listview*(). diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -25,6 +25,7 @@ from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.ropeobject import W_RopeObject from pypy.objspace.std.iterobject import W_SeqIterObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject @@ -448,9 +449,9 @@ def listview_str(self, w_obj): if isinstance(w_obj, W_ListObject): return w_obj.getitems_str() - if isinstance(w_obj, W_SetObject): + if isinstance(w_obj, W_BaseSetObject): return w_obj.listview_str() - if isinstance(w_obj, W_DictMultiObject): + if type(w_obj) is W_DictMultiObject: # test_listobject.test_uses_cus. return w_obj.listview_str() if isinstance(w_obj, W_StringObject): return w_obj.listview_str() @@ -459,9 +460,9 @@ def listview_int(self, w_obj): if isinstance(w_obj, W_ListObject): return w_obj.getitems_int() - if isinstance(w_obj, W_SetObject): + if isinstance(w_obj, W_BaseSetObject): return w_obj.listview_int() - if isinstance(w_obj, W_DictMultiObject): + if type(w_obj) is W_DictMultiObject: # test_listobject.test_uses_cus. return w_obj.listview_int() return None diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1182,13 +1182,12 @@ assert list(g) == [] def test_uses_custom_iterator(self): - for base, arg in [(list, []), (list, [5]), (list, ['x']), - (dict, []), (dict, [(5,6)]), (dict, [('x',7)]), - (tuple, []), (tuple, [5]), (tuple, ['x']), - (str, 'hello'), (unicode, 'hello')]: - class SubClass(base): - def __iter__(self): - return iter("foobar") + # obscure corner case: space.listview*() must not shortcut subclasses + # of dicts, because the OrderedDict in the stdlib relies on this. + class SubClass(dict): + def __iter__(self): + return iter("foobar") + for arg in [[], [(5,6)], [('x',7)]]: assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] class AppTestForRangeLists(AppTestW_ListObject): From noreply at buildbot.pypy.org Mon Mar 26 20:38:47 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 20:38:47 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Extend the test to all types with strategies. Message-ID: <20120326183847.A15BA820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54006:f56f0aee6c50 Date: 2012-03-26 20:36 +0200 http://bitbucket.org/pypy/pypy/changeset/f56f0aee6c50/ Log: Extend the test to all types with strategies. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1064,17 +1064,17 @@ init_defaults = [None] def init__List(space, w_list, __args__): - from pypy.objspace.std.tupleobject import W_TupleObject + from pypy.objspace.std.tupleobject import W_AbstractTupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) w_list.clear(space) if w_iterable is not None: - if isinstance(w_iterable, W_ListObject): + if type(w_iterable) is W_ListObject: w_iterable.copy_into(w_list) return - elif isinstance(w_iterable, W_TupleObject): - W_ListObject(space, w_iterable.wrappeditems[:]).copy_into(w_list) + elif isinstance(w_iterable, W_AbstractTupleObject): + w_list.__init__(space, w_iterable.getitems_copy()) return intlist = space.listview_int(w_iterable) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -25,7 +25,6 @@ from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.ropeobject import W_RopeObject from pypy.objspace.std.iterobject import W_SeqIterObject -from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject @@ -401,7 +400,7 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) @@ -415,7 +414,7 @@ """ if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.tolist() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() else: @@ -436,7 +435,7 @@ return self.fixedview(w_obj, expected_length, unroll=True) def listview(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: t = w_obj.getitems() elif isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() @@ -447,22 +446,24 @@ return t def listview_str(self, w_obj): - if isinstance(w_obj, W_ListObject): + # note: uses exact type checking for objects with strategies, + # and isinstance() for others. See test_listobject.test_uses_custom... + if type(w_obj) is W_ListObject: return w_obj.getitems_str() - if isinstance(w_obj, W_BaseSetObject): + if type(w_obj) is W_DictMultiObject: return w_obj.listview_str() - if type(w_obj) is W_DictMultiObject: # test_listobject.test_uses_cus. + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_str() if isinstance(w_obj, W_StringObject): return w_obj.listview_str() return None def listview_int(self, w_obj): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if isinstance(w_obj, W_BaseSetObject): + if type(w_obj) is W_DictMultiObject: return w_obj.listview_int() - if type(w_obj) is W_DictMultiObject: # test_listobject.test_uses_cus. + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() return None diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1184,10 +1184,15 @@ def test_uses_custom_iterator(self): # obscure corner case: space.listview*() must not shortcut subclasses # of dicts, because the OrderedDict in the stdlib relies on this. - class SubClass(dict): - def __iter__(self): - return iter("foobar") - for arg in [[], [(5,6)], [('x',7)]]: + # we extend the use case to lists and sets, i.e. all types that have + # strategies, to avoid surprizes depending on the strategy. + for base, arg in [(list, []), (list, [5]), (list, ['x']), + (set, []), (set, [5]), (set, ['x']), + (dict, []), (dict, [(5,6)]), (dict, [('x',7)])]: + print base, arg + class SubClass(base): + def __iter__(self): + return iter("foobar") assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] class AppTestForRangeLists(AppTestW_ListObject): From noreply at buildbot.pypy.org Mon Mar 26 20:41:22 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 26 Mar 2012 20:41:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Have bytearray.__getitem__(slice) go through the ll_arraycopy fast path when possible. Message-ID: <20120326184122.2AB43820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r54007:9a9eb5e9b67e Date: 2012-03-26 14:41 -0400 http://bitbucket.org/pypy/pypy/changeset/9a9eb5e9b67e/ Log: Have bytearray.__getitem__(slice) go through the ll_arraycopy fast path when possible. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -111,7 +111,10 @@ length = len(data) start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - newdata = [data[start + i*step] for i in range(slicelength)] + if step == 1 and 0 <= start <= stop: + newdata = data[start:stop] + else: + newdata = [data[start + i*step] for i in range(slicelength)] return W_BytearrayObject(newdata) def contains__Bytearray_Int(space, w_bytearray, w_char): From noreply at buildbot.pypy.org Mon Mar 26 21:28:56 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 21:28:56 +0200 (CEST) Subject: [pypy-commit] pypy set-strategies: Close branch about to be merged Message-ID: <20120326192856.290E0820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: set-strategies Changeset: r54008:1f903d2edaee Date: 2012-03-26 21:25 +0200 http://bitbucket.org/pypy/pypy/changeset/1f903d2edaee/ Log: Close branch about to be merged From noreply at buildbot.pypy.org Mon Mar 26 21:28:59 2012 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Mar 2012 21:28:59 +0200 (CEST) Subject: [pypy-commit] pypy default: merge set-strategies: Message-ID: <20120326192859.795E4820D9@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54009:43f380907aa7 Date: 2012-03-26 21:27 +0200 http://bitbucket.org/pypy/pypy/changeset/43f380907aa7/ Log: merge set-strategies: Add strategies to lists and sets. Some clean-ups in dicts, too. (See the branch for details.) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -128,3 +128,82 @@ loop, = log.loops_by_filename(self.filepath) ops = loop.ops_by_id('look') assert 'call' not in log.opnames(ops) + + #XXX the following tests only work with strategies enabled + + def test_should_not_create_intobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_intobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_list_from_string(self): + def main(n): + i = 0 + l = [] + while i < n: + l = list("abc" * i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_set_from_list(self): + def main(n): + i = 0 + while i < n: + s = set([1,2,3]) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,10 +127,10 @@ def iter(self, w_dict): return ModuleDictIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): + def w_keys(self, w_dict): space = self.space - iterator = self.unerase(w_dict.dstorage).iteritems - return [space.wrap(key) for key, cell in iterator()] + l = self.unerase(w_dict.dstorage).keys() + return space.newlist_str(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -90,9 +90,9 @@ def _add_indirections(): dict_methods = "setitem setitem_str getitem \ getitem_str delitem length \ - clear keys values \ + clear w_keys values \ items iter setdefault \ - popitem".split() + popitem listview_str listview_int".split() def make_method(method): def f(self, *args): @@ -113,7 +113,7 @@ def get_empty_storage(self): raise NotImplementedError - def keys(self, w_dict): + def w_keys(self, w_dict): iterator = self.iter(w_dict) result = [] while 1: @@ -121,7 +121,7 @@ if w_key is not None: result.append(w_key) else: - return result + return self.space.newlist(result) def values(self, w_dict): iterator = self.iter(w_dict) @@ -160,6 +160,11 @@ w_dict.strategy = strategy w_dict.dstorage = storage + def listview_str(self, w_dict): + return None + + def listview_int(self, w_dict): + return None class EmptyDictStrategy(DictStrategy): @@ -371,8 +376,9 @@ self.switch_to_object_strategy(w_dict) return w_dict.getitem(w_key) - def keys(self, w_dict): - return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + def w_keys(self, w_dict): + l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + return self.space.newlist(l) def values(self, w_dict): return self.unerase(w_dict.dstorage).values() @@ -425,8 +431,8 @@ def iter(self, w_dict): return ObjectIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): - return self.unerase(w_dict.dstorage).keys() + def w_keys(self, w_dict): + return self.space.newlist(self.unerase(w_dict.dstorage).keys()) class StringDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -469,9 +475,15 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) + def listview_str(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + def iter(self, w_dict): return StrIteratorImplementation(self.space, self, w_dict) + def w_keys(self, w_dict): + return self.space.newlist_str(self.listview_str(w_dict)) + class _WrappedIteratorMixin(object): _mixin_ = True @@ -534,6 +546,14 @@ def iter(self, w_dict): return IntIteratorImplementation(self.space, self, w_dict) + def listview_int(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + + def w_keys(self, w_dict): + # XXX there is no space.newlist_int yet + space = self.space + return space.call_function(space.w_list, w_dict) + class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation): pass @@ -688,7 +708,7 @@ return space.newlist(w_self.items()) def dict_keys__DictMulti(space, w_self): - return space.newlist(w_self.keys()) + return w_self.w_keys() def dict_values__DictMulti(space, w_self): return space.newlist(w_self.values()) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -76,7 +76,7 @@ def keys(self, w_dict): space = self.space - return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()] + return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -62,8 +62,14 @@ w_fill = space.w_None if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - for w_key in space.listview(w_keys): - w_dict.setitem(w_key, w_fill) + + strlist = space.listview_str(w_keys) + if strlist is not None: + for key in strlist: + w_dict.setitem_str(key, w_fill) + else: + for w_key in space.listview(w_keys): + w_dict.setitem(w_key, w_fill) else: w_dict = space.call_function(w_type) for w_key in space.listview(w_keys): diff --git a/pypy/objspace/std/frozensettype.py b/pypy/objspace/std/frozensettype.py --- a/pypy/objspace/std/frozensettype.py +++ b/pypy/objspace/std/frozensettype.py @@ -39,13 +39,11 @@ def descr__frozenset__new__(space, w_frozensettype, w_iterable=gateway.NoneNotWrapped): from pypy.objspace.std.setobject import W_FrozensetObject - from pypy.objspace.std.setobject import make_setdata_from_w_iterable if (space.is_w(w_frozensettype, space.w_frozenset) and w_iterable is not None and type(w_iterable) is W_FrozensetObject): return w_iterable w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype) - data = make_setdata_from_w_iterable(space, w_iterable) - W_FrozensetObject.__init__(w_obj, space, data) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj frozenset_typedef = StdTypeDef("frozenset", diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -29,9 +29,8 @@ class W_SeqIterObject(W_AbstractSeqIterObject): """Sequence iterator implementation for general sequences.""" -class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing directly their - RPython-level list of wrapped objects. +class W_FastListIterObject(W_AbstractSeqIterObject): # XXX still needed + """Sequence iterator specialized for lists. """ class W_FastTupleIterObject(W_AbstractSeqIterObject): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,6 +139,16 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) + def clear(self, space): + """Initializes (or overrides) the listobject as empty.""" + self.space = space + if space.config.objspace.std.withliststrategies: + strategy = space.fromcache(EmptyListStrategy) + else: + strategy = space.fromcache(ObjectListStrategy) + self.strategy = strategy + strategy.clear(self) + def clone(self): """Returns a clone by creating a new listobject with the same strategy and a copy of the storage""" @@ -200,6 +210,11 @@ """ Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None. """ return self.strategy.getitems_str(self) + + def getitems_int(self): + """ Return the items in the list as unwrapped ints. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_int(self) # ___________________________________________________ @@ -300,6 +315,9 @@ def getitems_str(self, w_list): return None + def getitems_int(self, w_list): + return None + def getstorage_copy(self, w_list): raise NotImplementedError @@ -358,6 +376,9 @@ assert len(list_w) == 0 w_list.lstorage = self.erase(None) + def clear(self, w_list): + w_list.lstorage = self.erase(None) + erase, unerase = rerased.new_erasing_pair("empty") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -516,6 +537,9 @@ raise IndexError return start + i * step + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + def getitem(self, w_list, i): return self.wrap(self._getitem_unwrapped(w_list, i)) @@ -696,6 +720,7 @@ for i in l: if i == obj: return True + return False return ListStrategy.contains(self, w_list, w_obj) def length(self, w_list): @@ -937,6 +962,9 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.erase(list_w) + def clear(self, w_list): + w_list.lstorage = self.erase([]) + def contains(self, w_list, w_obj): return ListStrategy.contains(self, w_list, w_obj) @@ -970,6 +998,9 @@ if reverse: l.reverse() + def getitems_int(self, w_list): + return self.unerase(w_list.lstorage) + class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -1027,37 +1058,49 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) init_defaults = [None] def init__List(space, w_list, __args__): - from pypy.objspace.std.tupleobject import W_TupleObject + from pypy.objspace.std.tupleobject import W_AbstractTupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - w_list.__init__(space, []) + w_list.clear(space) if w_iterable is not None: - # unfortunately this is duplicating space.unpackiterable to avoid - # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastListIterObject optimization. - if isinstance(w_iterable, W_ListObject): - w_list.extend(w_iterable) - elif isinstance(w_iterable, W_TupleObject): - w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:])) - else: - _init_from_iterable(space, w_list, w_iterable) + if type(w_iterable) is W_ListObject: + w_iterable.copy_into(w_list) + return + elif isinstance(w_iterable, W_AbstractTupleObject): + w_list.__init__(space, w_iterable.getitems_copy()) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + w_list.strategy = strategy = space.fromcache(IntegerListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(intlist[:]) + return + + strlist = space.listview_str(w_iterable) + if strlist is not None: + w_list.strategy = strategy = space.fromcache(StringListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(strlist[:]) + return + + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterable, GeneratorIterator): + w_iterable.unpack_into_w(w_list) + return + # /xxx + _init_from_iterable(space, w_list, w_iterable) def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List - # xxx special hack for speed - from pypy.interpreter.generator import GeneratorIterator - if isinstance(w_iterable, GeneratorIterator): - w_iterable.unpack_into_w(w_list) - return - # /xxx w_iterator = space.iter(w_iterable) while True: try: diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, space, []) + w_obj.clear(space) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -694,6 +694,8 @@ self.delitem(w_dict, w_key) return (w_key, w_value) + # XXX could implement a more efficient w_keys based on space.newlist_str + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -227,10 +227,7 @@ return W_ComplexObject(x.real, x.imag) if isinstance(x, set): - rdict_w = r_dict(self.eq_w, self.hash_w) - for item in x: - rdict_w[self.wrap(item)] = None - res = W_SetObject(self, rdict_w) + res = W_SetObject(self, self.newlist([self.wrap(item) for item in x])) return res if isinstance(x, frozenset): @@ -325,7 +322,7 @@ def newset(self): from pypy.objspace.std.setobject import newset - return W_SetObject(self, newset(self)) + return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): return W_SliceObject(w_start, w_end, w_step) @@ -403,7 +400,7 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) @@ -417,7 +414,7 @@ """ if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.tolist() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() else: @@ -438,7 +435,7 @@ return self.fixedview(w_obj, expected_length, unroll=True) def listview(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: t = w_obj.getitems() elif isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() @@ -449,8 +446,25 @@ return t def listview_str(self, w_obj): - if isinstance(w_obj, W_ListObject): + # note: uses exact type checking for objects with strategies, + # and isinstance() for others. See test_listobject.test_uses_custom... + if type(w_obj) is W_ListObject: return w_obj.getitems_str() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_str() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_str() + if isinstance(w_obj, W_StringObject): + return w_obj.listview_str() + return None + + def listview_int(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems_int() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_int() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_int() return None def sliceindices(self, w_slice, w_length): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -7,6 +7,12 @@ from pypy.interpreter.argument import Signature from pypy.objspace.std.settype import set_typedef as settypedef from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef +from pypy.rlib import rerased +from pypy.rlib.objectmodel import instantiate +from pypy.interpreter.generator import GeneratorIterator +from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.stringobject import W_StringObject class W_BaseSetObject(W_Object): typedef = None @@ -20,88 +26,859 @@ return True return False - - def __init__(w_self, space, setdata): + def __init__(w_self, space, w_iterable=None): """Initialize the set by taking ownership of 'setdata'.""" - assert setdata is not None - w_self.setdata = setdata + w_self.space = space + set_strategy_and_setdata(space, w_self, w_iterable) def __repr__(w_self): """representation for debugging purposes""" - reprlist = [repr(w_item) for w_item in w_self.setdata.keys()] + reprlist = [repr(w_item) for w_item in w_self.getkeys()] return "<%s(%s)>" % (w_self.__class__.__name__, ', '.join(reprlist)) + def from_storage_and_strategy(w_self, storage, strategy): + obj = w_self._newobj(w_self.space, None) + assert isinstance(obj, W_BaseSetObject) + obj.strategy = strategy + obj.sstorage = storage + return obj + _lifeline_ = None def getweakref(self): return self._lifeline_ + def setweakref(self, space, weakreflifeline): self._lifeline_ = weakreflifeline def delweakref(self): self._lifeline_ = None + def switch_to_object_strategy(self, space): + d = self.strategy.getdict_w(self) + self.strategy = strategy = space.fromcache(ObjectSetStrategy) + self.sstorage = strategy.erase(d) + + def switch_to_empty_strategy(self): + self.strategy = strategy = self.space.fromcache(EmptySetStrategy) + self.sstorage = strategy.get_empty_storage() + + # _____________ strategy methods ________________ + + + def clear(self): + """ Removes all elements from the set. """ + self.strategy.clear(self) + + def copy_real(self): + """ Returns a clone of the set. Frozensets storages are also copied.""" + return self.strategy.copy_real(self) + + def length(self): + """ Returns the number of items inside the set. """ + return self.strategy.length(self) + + def add(self, w_key): + """ Adds an element to the set. The element must be wrapped. """ + self.strategy.add(self, w_key) + + def remove(self, w_item): + """ Removes the given element from the set. Element must be wrapped. """ + return self.strategy.remove(self, w_item) + + def getdict_w(self): + """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ + return self.strategy.getdict_w(self) + + def listview_str(self): + """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ + return self.strategy.listview_str(self) + + def listview_int(self): + """ If this is an int set return its contents as a list of uwnrapped ints. Otherwise return None. """ + return self.strategy.listview_int(self) + + def get_storage_copy(self): + """ Returns a copy of the storage. Needed when we want to clone all elements from one set and + put them into another. """ + return self.strategy.get_storage_copy(self) + + def getkeys(self): + """ Returns a list of all elements inside the set. Only used in __repr__. Use as less as possible.""" + return self.strategy.getkeys(self) + + def difference(self, w_other): + """ Returns a set with all items that are in this set, but not in w_other. W_other must be a set.""" + return self.strategy.difference(self, w_other) + + def difference_update(self, w_other): + """ As difference but overwrites the sets content with the result. W_other must be a set.""" + self.strategy.difference_update(self, w_other) + + def symmetric_difference(self, w_other): + """ Returns a set with all items that are either in this set or in w_other, but not in both. W_other must be a set. """ + return self.strategy.symmetric_difference(self, w_other) + + def symmetric_difference_update(self, w_other): + """ As symmetric_difference but overwrites the content of the set with the result. W_other must be a set.""" + self.strategy.symmetric_difference_update(self, w_other) + + def intersect(self, w_other): + """ Returns a set with all items that exists in both sets, this set and in w_other. W_other must be a set. """ + return self.strategy.intersect(self, w_other) + + def intersect_update(self, w_other): + """ Keeps only those elements found in both sets, removing all other elements. W_other must be a set.""" + self.strategy.intersect_update(self, w_other) + + def issubset(self, w_other): + """ Checks wether this set is a subset of w_other. W_other must be a set. """ + return self.strategy.issubset(self, w_other) + + def isdisjoint(self, w_other): + """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. W_other must be a set.""" + return self.strategy.isdisjoint(self, w_other) + + def update(self, w_other): + """ Appends all elements from the given set to this set. W_other must be a set.""" + self.strategy.update(self, w_other) + + def has_key(self, w_key): + """ Checks wether this set contains the given wrapped key.""" + return self.strategy.has_key(self, w_key) + + def equals(self, w_other): + """ Checks wether this set and the given set are equal, i.e. contain the same elements. W_other must be a set.""" + return self.strategy.equals(self, w_other) + + def iter(self): + """ Returns an iterator of the elements from this set. """ + return self.strategy.iter(self) + + def popitem(self): + """ Removes an arbitrary element from the set. May raise KeyError if set is empty.""" + return self.strategy.popitem(self) + class W_SetObject(W_BaseSetObject): from pypy.objspace.std.settype import set_typedef as typedef - def _newobj(w_self, space, rdict_w): - """Make a new set by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new set by taking ownership of 'w_iterable'.""" if type(w_self) is W_SetObject: - return W_SetObject(space, rdict_w) + return W_SetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_SetObject, w_type) - W_SetObject.__init__(w_obj, space, rdict_w) + W_SetObject.__init__(w_obj, space, w_iterable) return w_obj class W_FrozensetObject(W_BaseSetObject): from pypy.objspace.std.frozensettype import frozenset_typedef as typedef hash = 0 - def _newobj(w_self, space, rdict_w): - """Make a new frozenset by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new frozenset by taking ownership of 'w_iterable'.""" if type(w_self) is W_FrozensetObject: - return W_FrozensetObject(space, rdict_w) + return W_FrozensetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_FrozensetObject, w_type) - W_FrozensetObject.__init__(w_obj, space, rdict_w) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj registerimplementation(W_BaseSetObject) registerimplementation(W_SetObject) registerimplementation(W_FrozensetObject) -class W_SetIterObject(W_Object): - from pypy.objspace.std.settype import setiter_typedef as typedef +class SetStrategy(object): + def __init__(self, space): + self.space = space - def __init__(w_self, setdata): - w_self.content = content = setdata - w_self.len = len(content) - w_self.pos = 0 - w_self.iterator = w_self.content.iterkeys() + def get_empty_dict(self): + """ Returns an empty dictionary depending on the strategy. Used to initalize a new storage. """ + raise NotImplementedError - def next_entry(w_self): - for w_key in w_self.iterator: + def get_empty_storage(self): + """ Returns an empty storage (erased) object. Used to initialize an empty set.""" + raise NotImplementedError + + def listview_str(self, w_set): + return None + + def listview_int(self, w_set): + return None + + #def erase(self, storage): + # raise NotImplementedError + + #def unerase(self, storage): + # raise NotImplementedError + + # __________________ methods called on W_SetObject _________________ + + def clear(self, w_set): + raise NotImplementedError + + def copy_real(self, w_set): + raise NotImplementedError + + def length(self, w_set): + raise NotImplementedError + + def add(self, w_set, w_key): + raise NotImplementedError + + def remove(self, w_set, w_item): + raise NotImplementedError + + def getdict_w(self, w_set): + raise NotImplementedError + + def get_storage_copy(self, w_set): + raise NotImplementedError + + def getkeys(self, w_set): + raise NotImplementedError + + def difference(self, w_set, w_other): + raise NotImplementedError + + def difference_update(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference_update(self, w_set, w_other): + raise NotImplementedError + + def intersect(self, w_set, w_other): + raise NotImplementedError + + def intersect_update(self, w_set, w_other): + raise NotImplementedError + + def issubset(self, w_set, w_other): + raise NotImplementedError + + def isdisjoint(self, w_set, w_other): + raise NotImplementedError + + def update(self, w_set, w_other): + raise NotImplementedError + + def has_key(self, w_set, w_key): + raise NotImplementedError + + def equals(self, w_set, w_other): + raise NotImplementedError + + def iter(self, w_set): + raise NotImplementedError + + def popitem(self, w_set): + raise NotImplementedError + +class EmptySetStrategy(SetStrategy): + + erase, unerase = rerased.new_erasing_pair("empty") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(None) + + def is_correct_type(self, w_key): + return False + + def length(self, w_set): + return 0 + + def clear(self, w_set): + pass + + def copy_real(self, w_set): + storage = self.erase(None) + clone = w_set.from_storage_and_strategy(storage, self) + return clone + + def add(self, w_set, w_key): + if type(w_key) is W_IntObject: + strategy = self.space.fromcache(IntegerSetStrategy) + elif type(w_key) is W_StringObject: + strategy = self.space.fromcache(StringSetStrategy) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_empty_storage() + w_set.add(w_key) + + def remove(self, w_set, w_item): + return False + + def getdict_w(self, w_set): + return newset(self.space) + + def get_storage_copy(self, w_set): + return w_set.sstorage + + def getkeys(self, w_set): + return [] + + def has_key(self, w_set, w_key): + return False + + def equals(self, w_set, w_other): + if w_other.strategy is self or w_other.length() == 0: + return True + return False + + def difference(self, w_set, w_other): + return w_set.copy_real() + + def difference_update(self, w_set, w_other): + pass + + def intersect(self, w_set, w_other): + return w_set.copy_real() + + def intersect_update(self, w_set, w_other): + pass + + def isdisjoint(self, w_set, w_other): + return True + + def issubset(self, w_set, w_other): + return True + + def symmetric_difference(self, w_set, w_other): + return w_other.copy_real() + + def symmetric_difference_update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def iter(self, w_set): + return EmptyIteratorImplementation(self.space, w_set) + + def popitem(self, w_set): + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + +class AbstractUnwrappedSetStrategy(object): + _mixin_ = True + + def is_correct_type(self, w_key): + """ Checks wether the given wrapped key fits this strategy.""" + raise NotImplementedError + + def unwrap(self, w_item): + """ Returns the unwrapped value of the given wrapped item.""" + raise NotImplementedError + + def wrap(self, item): + """ Returns a wrapped version of the given unwrapped item. """ + raise NotImplementedError + + def get_storage_from_list(self, list_w): + setdata = self.get_empty_dict() + for w_item in list_w: + setdata[self.unwrap(w_item)] = None + return self.erase(setdata) + + def get_storage_from_unwrapped_list(self, items): + setdata = self.get_empty_dict() + for item in items: + setdata[item] = None + return self.erase(setdata) + + def length(self, w_set): + return len(self.unerase(w_set.sstorage)) + + def clear(self, w_set): + w_set.switch_to_empty_strategy() + + def copy_real(self, w_set): + # may be used internally on frozen sets, although frozenset().copy() + # returns self in frozenset_copy__Frozenset. + strategy = w_set.strategy + d = self.unerase(w_set.sstorage) + storage = self.erase(d.copy()) + clone = w_set.from_storage_and_strategy(storage, strategy) + return clone + + def add(self, w_set, w_key): + if self.is_correct_type(w_key): + d = self.unerase(w_set.sstorage) + d[self.unwrap(w_key)] = None + else: + w_set.switch_to_object_strategy(self.space) + w_set.add(w_key) + + def remove(self, w_set, w_item): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + d = self.unerase(w_set.sstorage) + if not self.is_correct_type(w_item): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.remove(w_item) + + key = self.unwrap(w_item) + try: + del d[key] + return True + except KeyError: + return False + + def getdict_w(self, w_set): + result = newset(self.space) + keys = self.unerase(w_set.sstorage).keys() + for key in keys: + result[self.wrap(key)] = None + return result + + def get_storage_copy(self, w_set): + d = self.unerase(w_set.sstorage) + copy = self.erase(d.copy()) + return copy + + def getkeys(self, w_set): + keys = self.unerase(w_set.sstorage).keys() + keys_w = [self.wrap(key) for key in keys] + return keys_w + + def has_key(self, w_set, w_key): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + if not self.is_correct_type(w_key): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.has_key(w_key) + d = self.unerase(w_set.sstorage) + return self.unwrap(w_key) in d + + def equals(self, w_set, w_other): + if w_set.length() != w_other.length(): + return False + items = self.unerase(w_set.sstorage).keys() + for key in items: + if not w_other.has_key(self.wrap(key)): + return False + return True + + def _difference_wrapped(self, w_set, w_other): + strategy = self.space.fromcache(ObjectSetStrategy) + + d_new = strategy.get_empty_dict() + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + d_new[w_item] = None + + return strategy.erase(d_new) + + def _difference_unwrapped(self, w_set, w_other): + iterator = self.unerase(w_set.sstorage).iterkeys() + other_dict = self.unerase(w_other.sstorage) + result_dict = self.get_empty_dict() + for key in iterator: + if key not in other_dict: + result_dict[key] = None + return self.erase(result_dict) + + def _difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._difference_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = w_set.strategy + storage = w_set.sstorage + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._difference_wrapped(w_set, w_other) + return storage, strategy + + def difference(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_newset = w_set.from_storage_and_strategy(storage, strategy) + return w_newset + + def difference_update(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _symmetric_difference_unwrapped(self, w_set, w_other): + d_new = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_other.keys(): + if not key in d_this: + d_new[key] = None + for key in d_this.keys(): + if not key in d_other: + d_new[key] = None + + storage = self.erase(d_new) + return storage + + def _symmetric_difference_wrapped(self, w_set, w_other): + newsetdata = newset(self.space) + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + newsetdata[w_item] = None + + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + if not w_set.has_key(w_item): + newsetdata[w_item] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(newsetdata) + + def _symmetric_difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._symmetric_difference_unwrapped(w_set, w_other) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._symmetric_difference_wrapped(w_set, w_other) + return storage, strategy + + def symmetric_difference(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def symmetric_difference_update(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _intersect_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = strategy._intersect_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = self.space.fromcache(EmptySetStrategy) + storage = strategy.get_empty_storage() + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._intersect_wrapped(w_set, w_other) + return storage, strategy + + def _intersect_wrapped(self, w_set, w_other): + result = newset(self.space) + for key in self.unerase(w_set.sstorage): + w_key = self.wrap(key) + if w_other.has_key(w_key): + result[w_key] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(result) + + def _intersect_unwrapped(self, w_set, w_other): + result = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_this: + if key in d_other: + result[key] = None + return self.erase(result) + + def intersect(self, w_set, w_other): + if w_set.length() > w_other.length(): + return w_other.intersect(w_set) + + storage, strategy = self._intersect_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def intersect_update(self, w_set, w_other): + if w_set.length() > w_other.length(): + w_intersection = w_other.intersect(w_set) + strategy = w_intersection.strategy + storage = w_intersection.sstorage + else: + storage, strategy = self._intersect_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _issubset_unwrapped(self, w_set, w_other): + d_other = self.unerase(w_other.sstorage) + for item in self.unerase(w_set.sstorage): + if not item in d_other: + return False + return True + + def _issubset_wrapped(self, w_set, w_other): + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + return False + return True + + def issubset(self, w_set, w_other): + if w_set.length() == 0: + return True + + if w_set.strategy is w_other.strategy: + return self._issubset_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return False + else: + return self._issubset_wrapped(w_set, w_other) + + def _isdisjoint_unwrapped(self, w_set, w_other): + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_set: + if key in d_other: + return False + return True + + def _isdisjoint_wrapped(self, w_set, w_other): + d = self.unerase(w_set.sstorage) + for key in d: + if w_other.has_key(self.wrap(key)): + return False + return True + + def isdisjoint(self, w_set, w_other): + if w_other.length() == 0: + return True + if w_set.length() > w_other.length(): + return w_other.isdisjoint(w_set) + + if w_set.strategy is w_other.strategy: + return self._isdisjoint_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return True + else: + return self._isdisjoint_wrapped(w_set, w_other) + + def update(self, w_set, w_other): + if self is w_other.strategy: + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + d_set.update(d_other) + return + + w_set.switch_to_object_strategy(self.space) + w_set.update(w_other) + + def popitem(self, w_set): + storage = self.unerase(w_set.sstorage) + try: + # this returns a tuple because internally sets are dicts + result = storage.popitem() + except KeyError: + # strategy may still be the same even if dict is empty + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + return self.wrap(result[0]) + +class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("string") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_str(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + return type(w_key) is W_StringObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(IntegerSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.str_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return StringIteratorImplementation(self.space, self, w_set) + +class IntegerSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("integer") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_int(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + from pypy.objspace.std.intobject import W_IntObject + return type(w_key) is W_IntObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(StringSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.int_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return IntegerIteratorImplementation(self.space, self, w_set) + +class ObjectSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("object") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(self.get_empty_dict()) + + def get_empty_dict(self): + return newset(self.space) + + def is_correct_type(self, w_key): + return True + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return w_item + + def wrap(self, item): + return item + + def iter(self, w_set): + return RDictIteratorImplementation(self.space, self, w_set) + + def update(self, w_set, w_other): + d_obj = self.unerase(w_set.sstorage) + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + d_obj[w_item] = None + +class IteratorImplementation(object): + def __init__(self, space, implementation): + self.space = space + self.setimplementation = implementation + self.len = implementation.length() + self.pos = 0 + + def next(self): + if self.setimplementation is None: + return None + if self.len != self.setimplementation.length(): + self.len = -1 # Make this error state sticky + raise OperationError(self.space.w_RuntimeError, + self.space.wrap("set changed size during iteration")) + # look for the next entry + if self.pos < self.len: + result = self.next_entry() + self.pos += 1 + return result + # no more entries + self.setimplementation = None + return None + + def next_entry(self): + """ Purely abstract method + """ + raise NotImplementedError + + def length(self): + if self.setimplementation is not None: + return self.len - self.pos + return 0 + +class EmptyIteratorImplementation(IteratorImplementation): + def next_entry(self): + return None + + +class StringIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, w_set): + IteratorImplementation.__init__(self, space, w_set) + d = strategy.unerase(w_set.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class IntegerIteratorImplementation(IteratorImplementation): + #XXX same implementation in dictmultiobject on dictstrategy-branch + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class RDictIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for w_key in self.iterator: return w_key else: return None +class W_SetIterObject(W_Object): + from pypy.objspace.std.settype import setiter_typedef as typedef + # XXX this class should be killed, and the various + # iterimplementations should be W_Objects directly. + + def __init__(w_self, space, iterimplementation): + w_self.space = space + w_self.iterimplementation = iterimplementation + registerimplementation(W_SetIterObject) def iter__SetIterObject(space, w_setiter): return w_setiter def next__SetIterObject(space, w_setiter): - content = w_setiter.content - if content is not None: - if w_setiter.len != len(content): - w_setiter.len = -1 # Make this error state sticky - raise OperationError(space.w_RuntimeError, - space.wrap("Set changed size during iteration")) - # look for the next entry - w_result = w_setiter.next_entry() - if w_result is not None: - w_setiter.pos += 1 - return w_result - # no more entries - w_setiter.content = None + iterimplementation = w_setiter.iterimplementation + w_key = iterimplementation.next() + if w_key is not None: + return w_key raise OperationError(space.w_StopIteration, space.w_None) # XXX __length_hint__() @@ -116,107 +893,91 @@ def newset(space): return r_dict(space.eq_w, space.hash_w, force_non_null=True) -def make_setdata_from_w_iterable(space, w_iterable=None): - """Return a new r_dict with the content of w_iterable.""" +def set_strategy_and_setdata(space, w_set, w_iterable): + from pypy.objspace.std.intobject import W_IntObject + if w_iterable is None : + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + if isinstance(w_iterable, W_BaseSetObject): - return w_iterable.setdata.copy() - data = newset(space) - if w_iterable is not None: - for w_item in space.listview(w_iterable): - data[w_item] = None - return data + w_set.strategy = w_iterable.strategy + w_set.sstorage = w_iterable.get_storage_copy() + return + + stringlist = space.listview_str(w_iterable) + if stringlist is not None: + strategy = space.fromcache(StringSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + strategy = space.fromcache(IntegerSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) + return + + iterable_w = space.listview(w_iterable) + + if len(iterable_w) == 0: + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + + _pick_correct_strategy(space, w_set, iterable_w) + +def _pick_correct_strategy(space, w_set, iterable_w): + # check for integers + for w_item in iterable_w: + if type(w_item) is not W_IntObject: + break + else: + w_set.strategy = space.fromcache(IntegerSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + # check for strings + for w_item in iterable_w: + if type(w_item) is not W_StringObject: + break + else: + w_set.strategy = space.fromcache(StringSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + w_set.strategy = space.fromcache(ObjectSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) def _initialize_set(space, w_obj, w_iterable=None): - w_obj.setdata.clear() - if w_iterable is not None: - w_obj.setdata = make_setdata_from_w_iterable(space, w_iterable) + w_obj.clear() + set_strategy_and_setdata(space, w_obj, w_iterable) def _convert_set_to_frozenset(space, w_obj): - if space.isinstance_w(w_obj, space.w_set): - return W_FrozensetObject(space, - make_setdata_from_w_iterable(space, w_obj)) + if isinstance(w_obj, W_SetObject): + w_frozen = W_FrozensetObject(space, None) + w_frozen.strategy = w_obj.strategy + w_frozen.sstorage = w_obj.sstorage + return w_frozen + elif space.isinstance_w(w_obj, space.w_set): + w_frz = space.allocate_instance(W_FrozensetObject, space.w_frozenset) + W_FrozensetObject.__init__(w_frz, space, w_obj) + return w_frz else: return None -# helper functions for set operation on dicts - -def _is_eq(ld, rd): - if len(ld) != len(rd): - return False - for w_key in ld: - if w_key not in rd: - return False - return True - -def _difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - return result - -def _difference_dict_update(space, ld, rd): - if ld is rd: - ld.clear() # for the case 'a.difference_update(a)' - else: - for w_key in rd: - try: - del ld[w_key] - except KeyError: - pass - -def _intersection_dict(space, ld, rd): - result = newset(space) - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - result[w_key] = None - return result - -def _isdisjoint_dict(ld, rd): - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - return False - return True - -def _symmetric_difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - for w_key in rd: - if w_key not in ld: - result[w_key] = None - return result - -def _issubset_dict(ldict, rdict): - if len(ldict) > len(rdict): - return False - - for w_key in ldict: - if w_key not in rdict: - return False - return True - - -#end helper functions - def set_update__Set(space, w_left, others_w): """Update a set with the union of itself and another.""" - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - ld.update(w_other.setdata) # optimization only + w_left.update(w_other) # optimization only else: for w_key in space.listview(w_other): - ld[w_key] = None + w_left.add(w_key) def inplace_or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - ld.update(rd) + w_left.update(w_other) return w_left inplace_or__Set_Frozenset = inplace_or__Set_Set @@ -226,10 +987,10 @@ This has no effect if the element is already present. """ - w_left.setdata[w_other] = None + w_left.add(w_other) def set_copy__Set(space, w_set): - return w_set._newobj(space, w_set.setdata.copy()) + return w_set.copy_real() def frozenset_copy__Frozenset(space, w_left): if type(w_left) is W_FrozensetObject: @@ -238,63 +999,51 @@ return set_copy__Set(space, w_left) def set_clear__Set(space, w_left): - w_left.setdata.clear() + w_left.clear() def sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + return w_left.difference(w_other) sub__Set_Frozenset = sub__Set_Set sub__Frozenset_Set = sub__Set_Set sub__Frozenset_Frozenset = sub__Set_Set def set_difference__Set(space, w_left, others_w): - result = w_left.setdata - if len(others_w) == 0: - result = result.copy() - for w_other in others_w: - if isinstance(w_other, W_BaseSetObject): - rd = w_other.setdata # optimization only - else: - rd = make_setdata_from_w_iterable(space, w_other) - result = _difference_dict(space, result, rd) - return w_left._newobj(space, result) + result = w_left.copy_real() + set_difference_update__Set(space, result, others_w) + return result frozenset_difference__Frozenset = set_difference__Set def set_difference_update__Set(space, w_left, others_w): - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): # optimization only - _difference_dict_update(space, ld, w_other.setdata) + w_left.difference_update(w_other) else: - for w_key in space.listview(w_other): - try: - del ld[w_key] - except KeyError: - pass + w_other_as_set = w_left._newobj(space, w_other) + w_left.difference_update(w_other_as_set) def inplace_sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - _difference_dict_update(space, ld, rd) + w_left.difference_update(w_other) return w_left inplace_sub__Set_Frozenset = inplace_sub__Set_Set def eq__Set_Set(space, w_left, w_other): # optimization only (the general case is eq__Set_settypedef) - return space.wrap(_is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(w_left.equals(w_other)) eq__Set_Frozenset = eq__Set_Set eq__Frozenset_Frozenset = eq__Set_Set eq__Frozenset_Set = eq__Set_Set def eq__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(_is_eq(w_left.setdata, rd)) + # tested in test_buildinshortcut.py + #XXX do not make new setobject here + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(w_left.equals(w_other_as_set)) eq__Set_frozensettypedef = eq__Set_settypedef eq__Frozenset_settypedef = eq__Set_settypedef @@ -308,15 +1057,16 @@ eq__Frozenset_ANY = eq__Set_ANY def ne__Set_Set(space, w_left, w_other): - return space.wrap(not _is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(not w_left.equals(w_other)) ne__Set_Frozenset = ne__Set_Set ne__Frozenset_Frozenset = ne__Set_Set ne__Frozenset_Set = ne__Set_Set def ne__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(not _is_eq(w_left.setdata, rd)) + #XXX this is not tested + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(not w_left.equals(w_other_as_set)) ne__Set_frozensettypedef = ne__Set_settypedef ne__Frozenset_settypedef = ne__Set_settypedef @@ -331,12 +1081,12 @@ def contains__Set_ANY(space, w_left, w_other): try: - return space.newbool(w_other in w_left.setdata) + return space.newbool(w_left.has_key(w_other)) except OperationError, e: if e.match(space, space.w_TypeError): w_f = _convert_set_to_frozenset(space, w_other) if w_f is not None: - return space.newbool(w_f in w_left.setdata) + return space.newbool(w_left.has_key(w_f)) raise contains__Frozenset_ANY = contains__Set_ANY @@ -345,19 +1095,23 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(ld, rd)) + if w_left.length() > w_other.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other)) set_issubset__Set_Frozenset = set_issubset__Set_Set frozenset_issubset__Frozenset_Set = set_issubset__Set_Set frozenset_issubset__Frozenset_Frozenset = set_issubset__Set_Set def set_issubset__Set_ANY(space, w_left, w_other): - if space.is_w(w_left, w_other): - return space.w_True + # not checking whether w_left is w_other here, because if that were the + # case the more precise multimethod would have applied. - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(ld, rd)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() > w_other_as_set.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other_as_set)) frozenset_issubset__Frozenset_ANY = set_issubset__Set_ANY @@ -370,9 +1124,9 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(rd, ld)) + if w_left.length() < w_other.length(): + return space.w_False + return space.wrap(w_other.issubset(w_left)) set_issuperset__Set_Frozenset = set_issuperset__Set_Set set_issuperset__Frozenset_Set = set_issuperset__Set_Set @@ -382,8 +1136,11 @@ if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(rd, ld)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() < w_other_as_set.length(): + return space.w_False + return space.wrap(w_other_as_set.issubset(w_left)) frozenset_issuperset__Frozenset_ANY = set_issuperset__Set_ANY @@ -395,7 +1152,7 @@ # automatic registration of "lt(x, y)" as "not ge(y, x)" would not give the # correct answer here! def lt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) >= len(w_other.setdata): + if w_left.length() >= w_other.length(): return space.w_False else: return le__Set_Set(space, w_left, w_other) @@ -405,7 +1162,7 @@ lt__Frozenset_Frozenset = lt__Set_Set def gt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) <= len(w_other.setdata): + if w_left.length() <= w_other.length(): return space.w_False else: return ge__Set_Set(space, w_left, w_other) @@ -421,26 +1178,19 @@ Returns True if successfully removed. """ try: - del w_left.setdata[w_item] - return True - except KeyError: - return False + deleted = w_left.remove(w_item) except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_f = _convert_set_to_frozenset(space, w_item) - if w_f is None: - raise + else: + w_f = _convert_set_to_frozenset(space, w_item) + if w_f is None: + raise + deleted = w_left.remove(w_f) - try: - del w_left.setdata[w_f] - return True - except KeyError: - return False - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return False + if w_left.length() == 0: + w_left.switch_to_empty_strategy() + return deleted def set_discard__Set_ANY(space, w_left, w_item): _discard_from_set(space, w_left, w_item) @@ -454,8 +1204,12 @@ if w_set.hash != 0: return space.wrap(w_set.hash) hash = r_uint(1927868237) - hash *= r_uint(len(w_set.setdata) + 1) - for w_item in w_set.setdata: + hash *= r_uint(w_set.length() + 1) + w_iterator = w_set.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break h = space.hash_w(w_item) value = (r_uint(h ^ (h << 16) ^ 89869747) * multi) hash = hash ^ value @@ -468,71 +1222,75 @@ return space.wrap(hash) def set_pop__Set(space, w_left): - try: - w_key, _ = w_left.setdata.popitem() - except KeyError: - raise OperationError(space.w_KeyError, - space.wrap('pop from an empty set')) - return w_key + return w_left.popitem() def and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + new_set = w_left.intersect(w_other) + return new_set and__Set_Frozenset = and__Set_Set and__Frozenset_Set = and__Set_Set and__Frozenset_Frozenset = and__Set_Set -def _intersection_multiple(space, w_left, others_w): - result = w_left.setdata - for w_other in others_w: +def set_intersection__Set(space, w_left, others_w): + #XXX find smarter implementations + others_w = [w_left] + others_w + + # find smallest set in others_w to reduce comparisons + startindex, startlength = 0, -1 + for i in range(len(others_w)): + w_other = others_w[i] + try: + length = space.int_w(space.len(w_other)) + except OperationError, e: + if (e.match(space, space.w_TypeError) or + e.match(space, space.w_AttributeError)): + continue + raise + + if startlength == -1 or length < startlength: + startindex = i + startlength = length + + others_w[startindex], others_w[0] = others_w[0], others_w[startindex] + + result = w_left._newobj(space, others_w[0]) + for i in range(1,len(others_w)): + w_other = others_w[i] if isinstance(w_other, W_BaseSetObject): # optimization only - result = _intersection_dict(space, result, w_other.setdata) + result.intersect_update(w_other) else: - result2 = newset(space) - for w_key in space.listview(w_other): - if w_key in result: - result2[w_key] = None - result = result2 + w_other_as_set = w_left._newobj(space, w_other) + result.intersect_update(w_other_as_set) return result -def set_intersection__Set(space, w_left, others_w): - if len(others_w) == 0: - result = w_left.setdata.copy() - else: - result = _intersection_multiple(space, w_left, others_w) - return w_left._newobj(space, result) - frozenset_intersection__Frozenset = set_intersection__Set def set_intersection_update__Set(space, w_left, others_w): - result = _intersection_multiple(space, w_left, others_w) - w_left.setdata = result + result = set_intersection__Set(space, w_left, others_w) + w_left.strategy = result.strategy + w_left.sstorage = result.sstorage + return def inplace_and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.intersect_update(w_other) return w_left inplace_and__Set_Frozenset = inplace_and__Set_Set def set_isdisjoint__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - disjoint = _isdisjoint_dict(ld, rd) - return space.newbool(disjoint) + return space.newbool(w_left.isdisjoint(w_other)) set_isdisjoint__Set_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Set = set_isdisjoint__Set_Set def set_isdisjoint__Set_ANY(space, w_left, w_other): - ld = w_left.setdata + #XXX may be optimized when other strategies are added for w_key in space.listview(w_other): - if w_key in ld: + if w_left.has_key(w_key): return space.w_False return space.w_True @@ -540,9 +1298,8 @@ def set_symmetric_difference__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_result = w_left.symmetric_difference(w_other) + return w_result set_symmetric_difference__Set_Frozenset = set_symmetric_difference__Set_Set set_symmetric_difference__Frozenset_Set = set_symmetric_difference__Set_Set @@ -556,26 +1313,23 @@ def set_symmetric_difference__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_other_as_set = w_left._newobj(space, w_other) + w_result = w_left.symmetric_difference(w_other_as_set) + return w_result frozenset_symmetric_difference__Frozenset_ANY = \ set_symmetric_difference__Set_ANY def set_symmetric_difference_update__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.symmetric_difference_update(w_other) set_symmetric_difference_update__Set_Frozenset = \ set_symmetric_difference_update__Set_Set def set_symmetric_difference_update__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_other_as_set = w_left._newobj(space, w_other) + w_left.symmetric_difference_update(w_other_as_set) def inplace_xor__Set_Set(space, w_left, w_other): set_symmetric_difference_update__Set_Set(space, w_left, w_other) @@ -584,34 +1338,33 @@ inplace_xor__Set_Frozenset = inplace_xor__Set_Set def or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - result = ld.copy() - result.update(rd) - return w_left._newobj(space, result) + w_copy = w_left.copy_real() + w_copy.update(w_other) + return w_copy or__Set_Frozenset = or__Set_Set or__Frozenset_Set = or__Set_Set or__Frozenset_Frozenset = or__Set_Set def set_union__Set(space, w_left, others_w): - result = w_left.setdata.copy() + result = w_left.copy_real() for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - result.update(w_other.setdata) # optimization only + result.update(w_other) # optimization only else: for w_key in space.listview(w_other): - result[w_key] = None - return w_left._newobj(space, result) + result.add(w_key) + return result frozenset_union__Frozenset = set_union__Set def len__Set(space, w_left): - return space.newint(len(w_left.setdata)) + return space.newint(w_left.length()) len__Frozenset = len__Set def iter__Set(space, w_left): - return W_SetIterObject(w_left.setdata) + return W_SetIterObject(space, w_left.iter()) iter__Frozenset = iter__Set diff --git a/pypy/objspace/std/settype.py b/pypy/objspace/std/settype.py --- a/pypy/objspace/std/settype.py +++ b/pypy/objspace/std/settype.py @@ -68,7 +68,7 @@ def descr__new__(space, w_settype, __args__): from pypy.objspace.std.setobject import W_SetObject, newset w_obj = space.allocate_instance(W_SetObject, w_settype) - W_SetObject.__init__(w_obj, space, newset(space)) + W_SetObject.__init__(w_obj, space) return w_obj set_typedef = StdTypeDef("set", diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -69,6 +69,14 @@ def str_w(w_self, space): return w_self._value + def listview_str(w_self): + return _create_list_from_string(w_self._value) + +def _create_list_from_string(value): + # need this helper function to allow the jit to look inside and inline + # listview_str + return [s for s in value] + registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/objspace/std/test/test_builtinshortcut.py b/pypy/objspace/std/test/test_builtinshortcut.py --- a/pypy/objspace/std/test/test_builtinshortcut.py +++ b/pypy/objspace/std/test/test_builtinshortcut.py @@ -85,6 +85,20 @@ def setup_class(cls): from pypy import conftest cls.space = conftest.gettestobjspace(**WITH_BUILTINSHORTCUT) + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint class AppTestString(test_stringobject.AppTestStringObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -131,6 +131,45 @@ assert self.space.eq_w(space.call_function(get, w("33")), w(None)) assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) + def test_fromkeys_fastpath(self): + space = self.space + w = space.wrap + + w_l = self.space.newlist([w("a"),w("b")]) + w_l.getitems = None + w_d = space.call_method(space.w_dict, "fromkeys", w_l) + + assert space.eq_w(w_d.getitem_str("a"), space.w_None) + assert space.eq_w(w_d.getitem_str("b"), space.w_None) + + def test_listview_str_dict(self): + w = self.space.wrap + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) + + assert self.space.listview_str(w_d) == ["a", "b"] + + def test_listview_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + assert self.space.listview_int(w_d) == [1, 2] + + def test_keys_on_string_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_int(w_l)) == [1,2] + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_str(w_l)) == ["a", "b"] class AppTest_DictObject: def setup_class(cls): @@ -793,7 +832,9 @@ return x == y eq_w = eq def newlist(self, l): - return [] + return l + def newlist_str(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): @@ -933,7 +974,7 @@ def test_keys(self): self.fill_impl() - keys = self.impl.keys() + keys = self.impl.w_keys() # wrapped lists = lists in the fake space keys.sort() assert keys == [self.string, self.string2] self.check_not_devolved() @@ -1011,8 +1052,8 @@ d.setitem("s", 12) d.delitem(F()) - assert "s" not in d.keys() - assert F() not in d.keys() + assert "s" not in d.w_keys() + assert F() not in d.w_keys() class TestStrDictImplementation(BaseTestRDictImplementation): StrategyClass = StringDictStrategy diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -486,6 +486,14 @@ list.__init__(l, ['a', 'b', 'c']) assert l is l0 assert l == ['a', 'b', 'c'] + list.__init__(l) + assert l == [] + + def test_explicit_new_init_more_cases(self): + for assignment in [[], (), [3], ["foo"]]: + l = [1, 2] + l.__init__(assignment) + assert l == list(assignment) def test_extend_list(self): l = l0 = [1] @@ -1173,6 +1181,20 @@ assert l == [] assert list(g) == [] + def test_uses_custom_iterator(self): + # obscure corner case: space.listview*() must not shortcut subclasses + # of dicts, because the OrderedDict in the stdlib relies on this. + # we extend the use case to lists and sets, i.e. all types that have + # strategies, to avoid surprizes depending on the strategy. + for base, arg in [(list, []), (list, [5]), (list, ['x']), + (set, []), (set, [5]), (set, ['x']), + (dict, []), (dict, [(5,6)]), (dict, [('x',7)])]: + print base, arg + class SubClass(base): + def __iter__(self): + return iter("foobar") + assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] + class AppTestForRangeLists(AppTestW_ListObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -420,7 +420,7 @@ def test_listview_str(self): space = self.space - assert space.listview_str(space.wrap("a")) is None + assert space.listview_str(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) assert space.listview_str(w_l) == ["a", "b"] @@ -463,6 +463,44 @@ w_res = listobject.list_pop__List_ANY(space, w_l, space.w_None) # does not crash assert space.unwrap(w_res) == 3 + def test_create_list_from_set(self): + from pypy.objspace.std.setobject import W_SetObject + from pypy.objspace.std.setobject import _initialize_set + + space = self.space + w = space.wrap + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_l) + w_set.iter = None # make sure fast path is used + + w_l2 = W_ListObject(space, []) + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b"), space.wrap("c")]) + _initialize_set(self.space, w_set, w_l) + + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + + def test_listview_str_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) + assert self.space.listview_str(w_l) == ["a", "b"] + + def test_listview_int_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + assert self.space.listview_int(w_l) == [1, 2, 3] + class TestW_ListStrategiesDisabled: def setup_class(cls): diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,12 +8,14 @@ is not too wrong. """ import py.test -from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject +from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set -from pypy.objspace.std.setobject import newset, make_setdata_from_w_iterable +from pypy.objspace.std.setobject import newset from pypy.objspace.std.setobject import and__Set_Set from pypy.objspace.std.setobject import set_intersection__Set from pypy.objspace.std.setobject import eq__Set_Set +from pypy.conftest import gettestobjspace +from pypy.objspace.std.listobject import W_ListObject letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' @@ -29,12 +31,11 @@ self.false = self.space.w_False def test_and(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t0 = W_SetObject(self.space, newset(self.space)) + t0 = W_SetObject(self.space) _initialize_set(self.space, t0, self.otherword) - t1 = W_FrozensetObject(self.space, - make_setdata_from_w_iterable(self.space, self.otherword)) + t1 = W_FrozensetObject(self.space, self.otherword) r0 = and__Set_Set(self.space, s, t0) r1 = and__Set_Set(self.space, s, t1) assert eq__Set_Set(self.space, r0, r1) == self.true @@ -42,9 +43,9 @@ assert eq__Set_Set(self.space, r0, sr) == self.true def test_compare(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t = W_SetObject(self.space, newset(self.space)) + t = W_SetObject(self.space) _initialize_set(self.space, t, self.word) assert self.space.eq_w(s,t) u = self.space.wrap(set('simsalabim')) @@ -54,7 +55,247 @@ s = self.space.newset() assert self.space.str_w(self.space.repr(s)) == 'set([])' + def test_intersection_order(self): + # theses tests make sure that intersection is done in the correct order + # (smallest first) + space = self.space + a = W_SetObject(self.space) + _initialize_set(self.space, a, self.space.wrap("abcdefg")) + a.intersect = None + + b = W_SetObject(self.space) + _initialize_set(self.space, b, self.space.wrap("abc")) + + result = set_intersection__Set(space, a, [b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("abc")))) + + c = W_SetObject(self.space) + _initialize_set(self.space, c, self.space.wrap("e")) + + d = W_SetObject(self.space) + _initialize_set(self.space, d, self.space.wrap("ab")) + + # if ordering works correct we should start with set e + a.get_storage_copy = None + b.get_storage_copy = None + d.get_storage_copy = None + + result = set_intersection__Set(space, a, [d,c,b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) + + def test_create_set_from_list(self): + from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy + from pypy.objspace.std.floatobject import W_FloatObject + from pypy.objspace.std.model import W_Object + + w = self.space.wrap + intstr = self.space.fromcache(IntegerSetStrategy) + tmp_func = intstr.get_storage_from_list + # test if get_storage_from_list is no longer used + intstr.get_storage_from_list = None + + w_list = W_ListObject(self.space, [w(1), w(2), w(3)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is intstr + assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} + + w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} + + w_list = W_ListObject(self.space, [w("1"), w(2), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_Object) + + w_list = W_ListObject(self.space, [w(1.0), w(2.0), w(3.0)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_FloatObject) + + # changed cached object, need to change it back for other tests to pass + intstr.get_storage_from_list = tmp_func + + def test_listview_str_int_on_set(self): + w = self.space.wrap + + w_a = W_SetObject(self.space) + _initialize_set(self.space, w_a, w("abcdefg")) + assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert self.space.listview_int(w_a) is None + + w_b = W_SetObject(self.space) + _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) + assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] + assert self.space.listview_str(w_b) is None + class AppTestAppSetTest: + + def setup_class(self): + self.space = gettestobjspace() + w_fakeint = self.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + self.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + def test_simple(self): + a = set([1,2,3]) + b = set() + b.add(4) + c = a.union(b) + assert c == set([1,2,3,4]) + + def test_generator(self): + def foo(): + for i in [1,2,3,4,5]: + yield i + b = set(foo()) + assert b == set([1,2,3,4,5]) + + a = set(x for x in [1,2,3]) + assert a == set([1,2,3]) + + def test_generator2(self): + def foo(): + for i in [1,2,3]: + yield i + class A(set): + pass + a = A([1,2,3,4,5]) + b = a.difference(foo()) + assert b == set([4,5]) + + def test_or(self): + a = set([0,1,2]) + b = a | set([1,2,3]) + assert b == set([0,1,2,3]) + + # test inplace or + a |= set([1,2,3]) + assert a == b + + def test_clear(self): + a = set([1,2,3]) + a.clear() + assert a == set() + + def test_sub(self): + a = set([1,2,3,4,5]) + b = set([2,3,4]) + a - b == [1,5] + a.__sub__(b) == [1,5] + + #inplace sub + a = set([1,2,3,4]) + b = set([1,4]) + a -= b + assert a == set([2,3]) + + def test_issubset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert b.issubset(a) + c = [1,2,3,4] + assert b.issubset(c) + + a = set([1,2,3,4]) + b = set(['1','2']) + assert not b.issubset(a) + + def test_issuperset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert a.issuperset(b) + c = [2,3] + assert a.issuperset(c) + + c = [1,1,1,1,1] + assert a.issuperset(c) + assert set([1,1,1,1,1]).issubset(a) + + a = set([1,2,3]) + assert a.issuperset(a) + assert not a.issuperset(set([1,2,3,4,5])) + + def test_inplace_and(test): + a = set([1,2,3,4]) + b = set([0,2,3,5,6]) + a &= b + assert a == set([2,3]) + + def test_discard_remove(self): + a = set([1,2,3,4,5]) + a.remove(1) + assert a == set([2,3,4,5]) + a.discard(2) + assert a == set([3,4,5]) + + raises(KeyError, "a.remove(6)") + + def test_pop(self): + b = set() + raises(KeyError, "b.pop()") + + a = set([1,2,3,4,5]) + for i in xrange(5): + a.pop() + assert a == set() + raises(KeyError, "a.pop()") + + def test_symmetric_difference(self): + a = set([1,2,3]) + b = set([3,4,5]) + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = set('abc') + c = a.symmetric_difference(b) + assert c == set([1,2,3,'a','b','c']) + + def test_symmetric_difference_update(self): + a = set([1,2,3]) + b = set([3,4,5]) + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = set([3,4,5]) + a ^= b + assert a == set([1,2,4,5]) + def test_subtype(self): class subset(set):pass a = subset() @@ -131,6 +372,8 @@ assert (set('abc') != set('abcd')) assert (frozenset('abc') != frozenset('abcd')) assert (frozenset('abc') != set('abcd')) + assert set() != set('abc') + assert set('abc') != set('abd') def test_libpython_equality(self): for thetype in [frozenset, set]: @@ -178,6 +421,9 @@ s1 = set('abc') s1.update('d', 'ef', frozenset('g')) assert s1 == set('abcdefg') + s1 = set() + s1.update(set('abcd')) + assert s1 == set('abcd') def test_recursive_repr(self): class A(object): @@ -330,6 +576,7 @@ assert not set([1,2,5]).isdisjoint(frozenset([4,5,6])) assert not set([1,2,5]).isdisjoint([4,5,6]) assert not set([1,2,5]).isdisjoint((4,5,6)) + assert set([1,2,3]).isdisjoint(set([3.5,4.0])) def test_intersection(self): assert set([1,2,3]).intersection(set([2,3,4])) == set([2,3]) @@ -347,6 +594,35 @@ assert s.intersection() == s assert s.intersection() is not s + def test_intersection_swap(self): + s1 = s3 = set([1,2,3,4,5]) + s2 = set([2,3,6,7]) + s1 &= s2 + assert s1 == set([2,3]) + assert s3 == set([2,3]) + + def test_intersection_generator(self): + def foo(): + for i in range(5): + yield i + + s1 = s2 = set([1,2,3,4,5,6]) + assert s1.intersection(foo()) == set([1,2,3,4]) + s1.intersection_update(foo()) + assert s1 == set([1,2,3,4]) + assert s2 == set([1,2,3,4]) + + def test_intersection_string(self): + s = set([1,2,3]) + o = 'abc' + assert s.intersection(o) == set() + + def test_intersection_float(self): + a = set([1,2,3]) + b = set([3.0,4.0,5.0]) + c = a.intersection(b) + assert c == set([3.0]) + def test_difference(self): assert set([1,2,3]).difference(set([2,3,4])) == set([1]) assert set([1,2,3]).difference(frozenset([2,3,4])) == set([1]) @@ -361,6 +637,9 @@ s = set([1,2,3]) assert s.difference() == s assert s.difference() is not s + assert set([1,2,3]).difference(set([2,3,4,'5'])) == set([1]) + assert set([1,2,3,'5']).difference(set([2,3,4])) == set([1,'5']) + assert set().difference(set([1,2,3])) == set() def test_intersection_update(self): s = set([1,2,3,4,7]) @@ -381,3 +660,250 @@ assert s == set([2,3]) s.difference_update(s) assert s == set([]) + + def test_empty_empty(self): + assert set() == set([]) + + def test_empty_difference(self): + e = set() + x = set([1,2,3]) + assert e.difference(x) == set() + assert x.difference(e) == x + + e.difference_update(x) + assert e == set() + x.difference_update(e) + assert x == set([1,2,3]) + + assert e.symmetric_difference(x) == x + assert x.symmetric_difference(e) == x + + e.symmetric_difference_update(e) + assert e == e + e.symmetric_difference_update(x) + assert e == x + + x.symmetric_difference_update(set()) + assert x == set([1,2,3]) + + def test_fastpath_with_strategies(self): + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.difference(b) == a + assert b.difference(a) == b + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.intersection(b) == set() + assert b.intersection(a) == set() + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert not a.issubset(b) + assert not b.issubset(a) + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.isdisjoint(b) + assert b.isdisjoint(a) + + def test_empty_intersect(self): + e = set() + x = set([1,2,3]) + assert e.intersection(x) == e + assert x.intersection(e) == e + assert e & x == e + assert x & e == e + + e.intersection_update(x) + assert e == set() + e &= x + assert e == set() + x.intersection_update(e) + assert x == set() + + def test_empty_issuper(self): + e = set() + x = set([1,2,3]) + assert e.issuperset(e) == True + assert e.issuperset(x) == False + assert x.issuperset(e) == True + + assert e.issuperset(set()) + assert e.issuperset([]) + + def test_empty_issubset(self): + e = set() + x = set([1,2,3]) + assert e.issubset(e) == True + assert e.issubset(x) == True + assert x.issubset(e) == False + assert e.issubset([]) + + def test_empty_isdisjoint(self): + e = set() + x = set([1,2,3]) + assert e.isdisjoint(e) == True + assert e.isdisjoint(x) == True + assert x.isdisjoint(e) == True + + def test_empty_unhashable(self): + s = set() + raises(TypeError, s.difference, [[]]) + raises(TypeError, s.difference_update, [[]]) + raises(TypeError, s.intersection, [[]]) + raises(TypeError, s.intersection_update, [[]]) + raises(TypeError, s.symmetric_difference, [[]]) + raises(TypeError, s.symmetric_difference_update, [[]]) + raises(TypeError, s.update, [[]]) + + def test_super_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).issuperset(foo()) + + def test_isdisjoint_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).isdisjoint(foo()) + + def test_fakeint_and_equals(self): + s1 = set([1,2,3,4]) + s2 = set([1,2,self.FakeInt(3), 4]) + assert s1 == s2 + + def test_fakeint_and_discard(self): + # test with object strategy + s = set([1, 2, 'three', 'four']) + s.discard(self.FakeInt(2)) + assert s == set([1, 'three', 'four']) + + s.remove(self.FakeInt(1)) + assert s == set(['three', 'four']) + raises(KeyError, s.remove, self.FakeInt(16)) + + # test with int strategy + s = set([1,2,3,4]) + s.discard(self.FakeInt(4)) + assert s == set([1,2,3]) + s.remove(self.FakeInt(3)) + assert s == set([1,2]) + raises(KeyError, s.remove, self.FakeInt(16)) + + def test_fakeobject_and_has_key(self): + s = set([1,2,3,4,5]) + assert 5 in s + assert self.FakeInt(5) in s + + def test_fakeobject_and_pop(self): + s = set([1,2,3,self.FakeInt(4),5]) + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s == set([]) + + def test_fakeobject_and_difference(self): + s = set([1,2,'3',4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set(['3',4]) + + s = set([1,2,3,4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set([3,4]) + + def test_frozenset_behavior(self): + s = set([1,2,3,frozenset([4])]) + raises(TypeError, s.difference_update, [1,2,3,set([4])]) + + s = set([1,2,3,frozenset([4])]) + s.discard(set([4])) + assert s == set([1,2,3]) + + def test_discard_unhashable(self): + s = set([1,2,3,4]) + raises(TypeError, s.discard, [1]) + + def test_discard_evil_compare(self): + class Evil(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if isinstance(other, frozenset): + raise TypeError + if other == self.value: + return True + return False + s = set([1,2, Evil(frozenset([1]))]) + raises(TypeError, s.discard, set([1])) + + def test_create_set_from_set(self): + # no sharing + x = set([1,2,3]) + y = set(x) + a = x.pop() + assert y == set([1,2,3]) + assert len(x) == 2 + assert x.union(set([a])) == y + + def test_never_change_frozenset(self): + a = frozenset([1,2]) + b = a.copy() + assert a is b + + a = frozenset([1,2]) + b = a.union(set([3,4])) + assert b == set([1,2,3,4]) + assert a == set([1,2]) + + a = frozenset() + b = a.union(set([3,4])) + assert b == set([3,4]) + assert a == set() + + a = frozenset([1,2])#multiple + b = a.union(set([3,4]),[5,6]) + assert b == set([1,2,3,4,5,6]) + assert a == set([1,2]) + + a = frozenset([1,2,3]) + b = a.difference(set([3,4,5])) + assert b == set([1,2]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.difference(set([3]), [2]) + assert b == set([1]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.symmetric_difference(set([3,4,5])) + assert b == set([1,2,4,5]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.intersection(set([3,4,5])) + assert b == set([3]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.intersection(set([2,3,4]), [2]) + assert b == set([2]) + assert a == set([1,2,3]) + + raises(AttributeError, "frozenset().update()") + raises(AttributeError, "frozenset().difference_update()") + raises(AttributeError, "frozenset().symmetric_difference_update()") + raises(AttributeError, "frozenset().intersection_update()") + + def test_intersection_obj(self): + class Obj: + def __getitem__(self, i): + return [5, 3, 4][i] + s = set([10,3,2]).intersection(Obj()) + assert list(s) == [3] diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -0,0 +1,107 @@ +from pypy.objspace.std.setobject import W_SetObject +from pypy.objspace.std.setobject import IntegerSetStrategy, ObjectSetStrategy, EmptySetStrategy +from pypy.objspace.std.listobject import W_ListObject + +class TestW_SetStrategies: + + def wrapped(self, l): + return W_ListObject(self.space, [self.space.wrap(x) for x in l]) + + def test_from_list(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert s.strategy is self.space.fromcache(IntegerSetStrategy) + + s = W_SetObject(self.space, self.wrapped([1,"two",3,"four",5])) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s = W_SetObject(self.space) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + s = W_SetObject(self.space, self.wrapped([])) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + def test_switch_to_object(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s.add(self.space.wrap("six")) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_symmetric_difference(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.symmetric_difference_update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_intersection(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5, "six", "seven"])) + s3 = s1.intersect(s2) + skip("for now intersection with ObjectStrategy always results in another ObjectStrategy") + assert s3.strategy is self.space.fromcache(IntegerSetStrategy) + + def test_clear(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s1.clear() + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_remove(self): + from pypy.objspace.std.setobject import set_remove__Set_ANY + s1 = W_SetObject(self.space, self.wrapped([1])) + set_remove__Set_ANY(self.space, s1, self.space.wrap(1)) + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_union(self): + from pypy.objspace.std.setobject import set_union__Set + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5,6,7])) + s3 = W_SetObject(self.space, self.wrapped([4,'5','6',7])) + s4 = set_union__Set(self.space, s1, [s2]) + s5 = set_union__Set(self.space, s1, [s3]) + assert s4.strategy is self.space.fromcache(IntegerSetStrategy) + assert s5.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_discard(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + set_discard__Set_ANY(self.space, s1, self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + set_discard__Set_ANY(self.space, s1, self.space.wrap(FakeInt(5))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_has_key(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert not s1.has_key(self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + assert s1.has_key(self.space.wrap(FakeInt(2))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -85,6 +85,10 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) + def test_listview_str(self): + w_str = self.space.wrap('abcd') + assert self.space.listview_str(w_str) == list("abcd") + class AppTestStringObject: def test_format_wrongchar(self): From noreply at buildbot.pypy.org Mon Mar 26 22:47:25 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Mon, 26 Mar 2012 22:47:25 +0200 (CEST) Subject: [pypy-commit] pypy default: all asmgcroot tests disabled for MSVC Message-ID: <20120326204725.776A3820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r54010:4dcfa3206067 Date: 2012-03-26 22:46 +0200 http://bitbucket.org/pypy/pypy/changeset/4dcfa3206067/ Log: all asmgcroot tests disabled for MSVC diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -6,6 +6,7 @@ from pypy.annotation.listdef import s_list_of_strings from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform as compiler from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop @@ -17,6 +18,8 @@ @classmethod def make_config(cls): + if compiler.name == "msvc": + py.test.skip("all asmgcroot tests disabled for MSVC") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy From noreply at buildbot.pypy.org Tue Mar 27 00:34:42 2012 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Mar 2012 00:34:42 +0200 (CEST) Subject: [pypy-commit] pypy default: move a loop outside Message-ID: <20120326223442.8075C820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54011:efe9b0fe618a Date: 2012-03-27 00:34 +0200 http://bitbucket.org/pypy/pypy/changeset/efe9b0fe618a/ Log: move a loop outside diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -114,9 +114,12 @@ if step == 1 and 0 <= start <= stop: newdata = data[start:stop] else: - newdata = [data[start + i*step] for i in range(slicelength)] + newdata = _getitem_slice_multistep(data, start, step, slicelength) return W_BytearrayObject(newdata) +def _getitem_slice_multistep(data, start, step, slicelength): + return [data[start + i*step] for i in range(slicelength)] + def contains__Bytearray_Int(space, w_bytearray, w_char): char = space.int_w(w_char) if not 0 <= char < 256: From noreply at buildbot.pypy.org Tue Mar 27 00:37:16 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 00:37:16 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Added the operation for RPython. Message-ID: <20120326223716.CFD4A820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54012:9878f8e8d93f Date: 2012-03-26 17:57 -0400 http://bitbucket.org/pypy/pypy/changeset/9878f8e8d93f/ Log: Added the operation for RPython. diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -21,7 +21,7 @@ FLOAT_ARRAY_PTR = lltype.Ptr(lltype.Array(rffi.FLOAT)) # these definitions are used only in tests, when not translated -def longlong2float_emulator(llval): +def longlong2float(llval): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) ll_array[0] = llval @@ -51,12 +51,6 @@ eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" -static double pypy__longlong2float(long long x) { - double dd; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&dd, &x, 8); - return dd; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -71,12 +65,6 @@ } """]) -longlong2float = rffi.llexternal( - "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, - _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__longlong2float") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,4 +87,15 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) - return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) + +class LongLong2FloatEntry(ExtRegistryEntry): + _about_ = longlong2float + + def compute_result_annotation(self, s_longlong): + assert annmodel.SomeInteger(knowntype=r_int64).contains(s_longlong) + return annmodel.SomeFloat() + + def specialize_call(self, hop): + [v_longlong] = hop.inputargs(lltype.SignedLongLong) + return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -2,6 +2,7 @@ from pypy.rlib.longlong2float import longlong2float, float2longlong from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rpython.test.test_llinterp import interpret def fn(f1): @@ -31,6 +32,11 @@ res = fn2(x) assert repr(res) == repr(x) +def test_interpreted(): + for x in enum_floats(): + res = interpret(fn, [x]) + assert repr(res) == repr(x) + # ____________________________________________________________ def fnsingle(f1): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -350,6 +350,7 @@ 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() 'convert_float_bytes_to_longlong': LLOp(canfold=True), + 'convert_longlong_bytes_to_float': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -431,6 +431,10 @@ from pypy.rlib.longlong2float import float2longlong return float2longlong(a) +def op_convert_longlong_bytes_to_float(a): + from pypy.rlib.longlong2float import longlong2float + return longlong2float(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -43,5 +43,6 @@ #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) #endif From noreply at buildbot.pypy.org Tue Mar 27 00:37:18 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 00:37:18 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: JIT support, x86-64 now, next 32-bit. Message-ID: <20120326223718.355B3820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54013:ed47a0d6283d Date: 2012-03-26 18:35 -0400 http://bitbucket.org/pypy/pypy/changeset/ed47a0d6283d/ Log: JIT support, x86-64 now, next 32-bit. diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -450,6 +450,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) +OPERATIONS.append(CastIntToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1251,6 +1251,15 @@ else: self.mov(loc0, resloc) + def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + raise + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -778,6 +778,15 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) + def consider_convert_longlong_bytes_to_float(self, op): + if longlong.is_64_bit: + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.rm.possibly_free_var(op.getarg(0)) + else: + raise + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -295,6 +295,7 @@ return op rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,20 +968,22 @@ int_return %i2 """, transform=True) - def test_convert_float_bytes_to_int(self): - from pypy.rlib.longlong2float import float2longlong + def test_convert_float_bytes(self): + from pypy.rlib.longlong2float import float2longlong, longlong2float def f(x): - return float2longlong(x) + ll = float2longlong(x) + return longlong2float(ll) if longlong.is_64_bit: - result_var = "%i0" - return_op = "int_return" + tmp_var = "%i0" + result_var = "%f1" else: - result_var = "%f1" - return_op = "float_return" + tmp_var = "%f1" + result_var = "%f2" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %%f0 -> %(result_var)s - %(return_op)s %(result_var)s - """ % {"result_var": result_var, "return_op": return_op}) + convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s + convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s + float_return %(result_var)s + """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -672,6 +672,10 @@ a = longlong.getrealfloat(a) return longlong2float.float2longlong(a) + @arguments(LONGLONG_TYPECODE, returns="f") + def bhimpl_convert_longlong_bytes_to_float(a): + return longlong2float.longlong2float(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -224,6 +224,7 @@ 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', 'convert_float_bytes_to_longlong', + 'convert_longlong_bytes_to_float', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -420,6 +420,7 @@ 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', + 'CONVERT_LONGLONG_BYTES_TO_FLOAT/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,3 +1,4 @@ +import math import sys import py @@ -15,7 +16,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3805,6 +3806,14 @@ res = self.interp_operations(f, [x]) assert longlong.getfloatstorage(res) == expected + def test_longlong2float(self): + def f(n): + return longlong2float(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + longval = float2longlong(x) + res = self.interp_operations(f, [longval]) + assert res == x or math.isnan(x) and math.isnan(res) class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): From noreply at buildbot.pypy.org Tue Mar 27 00:51:52 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 00:51:52 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Now works on 32-bit. test_zll_stress doesn't work though. Message-ID: <20120326225152.CCE67820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54014:cb37f9de4640 Date: 2012-03-26 22:50 +0000 http://bitbucket.org/pypy/pypy/changeset/cb37f9de4640/ Log: Now works on 32-bit. test_zll_stress doesn't work though. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1258,7 +1258,7 @@ assert isinstance(loc0, RegLoc) self.mc.MOVD(resloc, loc0) else: - raise + self.mov(loc0, resloc) def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -785,7 +785,10 @@ self.Perform(op, [loc0], loc1) self.rm.possibly_free_var(op.getarg(0)) else: - raise + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -674,7 +674,8 @@ @arguments(LONGLONG_TYPECODE, returns="f") def bhimpl_convert_longlong_bytes_to_float(a): - return longlong2float.longlong2float(a) + a = longlong2float.longlong2float(a) + return longlong.getfloatstorage(a) # ---------- # control flow operations diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3796,25 +3796,17 @@ res = self.interp_operations(g, [1]) assert res == 3 - def test_float2longlong(self): + def test_float_bytes(self): def f(n): - return float2longlong(n) + ll = float2longlong(n) + return longlong2float(ll) for x in [2.5, float("nan"), -2.5, float("inf")]: # There are tests elsewhere to verify the correctness of this. - expected = float2longlong(x) res = self.interp_operations(f, [x]) - assert longlong.getfloatstorage(res) == expected - - def test_longlong2float(self): - def f(n): - return longlong2float(n) - - for x in [2.5, float("nan"), -2.5, float("inf")]: - longval = float2longlong(x) - res = self.interp_operations(f, [longval]) assert res == x or math.isnan(x) and math.isnan(res) + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue From noreply at buildbot.pypy.org Tue Mar 27 01:21:55 2012 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Mar 2012 01:21:55 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes: fix Message-ID: <20120326232155.1960D820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: float-bytes Changeset: r54015:46e174a843e3 Date: 2012-03-27 01:21 +0200 http://bitbucket.org/pypy/pypy/changeset/46e174a843e3/ Log: fix diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,8 +773,9 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) - loc1 = self.xrm.force_allocate_reg(op.result) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) From noreply at buildbot.pypy.org Tue Mar 27 04:20:54 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 27 Mar 2012 04:20:54 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: o) handle constructors separately, as needed for the CINT backend Message-ID: <20120327022054.076C0820D9@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54016:e21a5f2afe86 Date: 2012-03-26 16:51 -0700 http://bitbucket.org/pypy/pypy/changeset/e21a5f2afe86/ Log: o) handle constructors separately, as needed for the CINT backend o) serialize calls through CINT (which uses global variables when calling stubs) diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/cppyy/capi/__init__.py @@ -109,6 +109,11 @@ [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, compilation_info=backend.eci) +c_constructor = rffi.llexternal( + "cppyy_constructor", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void, + compilation_info=backend.eci) + c_call_o = rffi.llexternal( "cppyy_call_o", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, C_TYPE], rffi.LONG, diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -41,6 +41,7 @@ _c_load_dictionary = rffi.llexternal( "cppyy_load_dictionary", [rffi.CCHARP], rdynload.DLLHANDLE, + threadsafe=False, compilation_info=eci) def c_load_dictionary(name): diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -236,6 +236,14 @@ typecode = 'd' +class ConstructorExecutor(VoidExecutor): + _immutable_ = True + + def execute(self, space, cppmethod, cppthis, num_args, args): + capi.c_constructor(cppmethod, cppthis, num_args, args) + return space.w_None + + class InstancePtrExecutor(FunctionExecutor): _immutable_ = True libffitype = libffi.types.pointer @@ -387,6 +395,8 @@ _executors["double"] = DoubleExecutor _executors["double*"] = DoublePtrExecutor +_executors["constructor"] = ConstructorExecutor + # special cases (note: CINT backend requires the simple name 'string') _executors["std::basic_string"] = StdStringExecutor _executors["string"] = _executors["std::basic_string"] diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -37,6 +37,7 @@ void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); + void cppyy_constructor(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); cppyy_object_t cppyy_call_o(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t result_type); cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, int method_index); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -490,7 +490,7 @@ arg_dflt = capi.c_method_arg_default(self.handle, method_index, i) arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.handle, method_index): - result_type = "void" # b/c otherwise CINT v.s. Reflex difference + result_type = "constructor" cls = CPPConstructor elif capi.c_is_staticmethod(self.handle, method_index): cls = CPPFunction diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -30,9 +30,11 @@ #include -/* CINT internals (won't work on Windwos) ------------------------------- */ +/* CINT internals (some won't work on Windows) -------------------------- */ extern long G__store_struct_offset; extern "C" void* G__SetShlHandle(char*); +extern "C" void G__LockCriticalSection(); +extern "C" void G__UnlockCriticalSection(); /* data for life time management ------------------------------------------ */ @@ -247,20 +249,26 @@ assert(libp->paran == nargs); fixup_args(libp); + G__value result; + G__setnull(&result); + + G__LockCriticalSection(); // is recursive lock + // TODO: access to store_struct_offset won't work on Windows long store_struct_offset = G__store_struct_offset; - if (self) { - G__setgvp((long)self); + if (self) G__store_struct_offset = (long)self; - } - G__value result; - G__setnull(&result); meth(&result, 0, libp, 0); if (self) G__store_struct_offset = store_struct_offset; + if (G__get_return(0) > G__RETURN_NORMAL) + G__security_recover(0); // 0 ensures silence + + G__UnlockCriticalSection(); + return result; } @@ -319,6 +327,12 @@ return cppstring_to_cstring(""); } +void cppyy_constructor(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + G__setgvp((long)self); + cppyy_call_T(method, self, nargs, args); + G__setgvp((long)G__PVOID); +} + cppyy_object_t cppyy_call_o(cppyy_type_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t /*result_type*/ ) { G__value result = cppyy_call_T(method, self, nargs, args); diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -155,6 +155,10 @@ return cppstring_to_cstring(result); } +void cppyy_constructor(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + cppyy_call_v(method, self, nargs, args); +} + cppyy_object_t cppyy_call_o(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t result_type) { void* result = (void*)cppyy_allocate(result_type); From noreply at buildbot.pypy.org Tue Mar 27 04:20:58 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 27 Mar 2012 04:20:58 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20120327022058.5DEF5820D9@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54017:29c38205cc91 Date: 2012-03-26 18:08 -0700 http://bitbucket.org/pypy/pypy/changeset/29c38205cc91/ Log: merge default into branch diff too long, truncating to 10000 out of 14668 lines diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -6,7 +6,7 @@ import _numpypy as multiarray # ARGH from numpypy.core.arrayprint import array2string - +newaxis = None def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): """ @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) @@ -319,4 +438,4 @@ False_ = bool_(False) True_ = bool_(True) e = math.e -pi = math.pi \ No newline at end of file +pi = math.pi diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/__init__.py b/pypy/__init__.py --- a/pypy/__init__.py +++ b/pypy/__init__.py @@ -1,1 +1,16 @@ # Empty + +# XXX Should be empty again, soon. +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -148,7 +148,6 @@ "the attribute here; the list of read locations is:\n" + '\n'.join([str(loc[0]) for loc in self.read_locations])) - class ClassDef(object): "Wraps a user class." diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,86 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 miles view: + +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + The RPython standard library is to be found in the ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. + +.. _`we have a tracing JIT`: jit/index.html + +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. + +.. _`Garbage collection in PyPy`: garbage_collection.html + + +Toolset +======= + +xxx diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1336,7 +1336,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -47,6 +47,11 @@ def async(self, space): "Check if this is an exception that should better not be caught." + if not space.full_exceptions: + # flow objspace does not support such exceptions and more + # importantly, raises KeyboardInterrupt if you try to access + # space.w_KeyboardInterrupt + return False return (self.match(space, space.w_SystemExit) or self.match(space, space.w_KeyboardInterrupt)) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -901,15 +901,17 @@ def __init__(self, source, filename=None, modname='__builtin__'): # HAAACK (but a good one) + self.filename = filename + self.source = str(py.code.Source(source).deindent()) + self.modname = modname if filename is None: f = sys._getframe(1) filename = '<%s:%d>' % (f.f_code.co_filename, f.f_lineno) + if not os.path.exists(filename): + # make source code available for tracebacks + lines = [x + "\n" for x in source.split("\n")] + py.std.linecache.cache[filename] = (1, None, lines, filename) self.filename = filename - self.source = str(py.code.Source(source).deindent()) - self.modname = modname - # make source code available for tracebacks - lines = [x + "\n" for x in source.split("\n")] - py.std.linecache.cache[filename] = (1, None, lines, filename) def __repr__(self): return "" % (self.filename,) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -321,7 +321,7 @@ except KeyError: pass # 'var' is already not in a register - def loc(self, box): + def loc(self, box, must_exist=False): """ Return the location of 'box'. """ self._check_type(box) @@ -332,6 +332,8 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg + if must_exist: + return self.frame_manager.bindings[box] return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -360,7 +362,7 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + prev_loc = self.loc(v, must_exist=True) if prev_loc is self.frame_reg and selected_reg is None: return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -11,6 +11,7 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.rlib.rarithmetic import is_valid_int def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -103,7 +104,7 @@ gcrootmap.put(retaddr, shapeaddr) assert gcrootmap._gcmap[0] == retaddr assert gcrootmap._gcmap[1] == shapeaddr - p = rffi.cast(rffi.LONGP, gcrootmap.gcmapstart()) + p = rffi.cast(rffi.SIGNEDP, gcrootmap.gcmapstart()) assert p[0] == retaddr assert (gcrootmap.gcmapend() == gcrootmap.gcmapstart() + rffi.sizeof(lltype.Signed) * 2) @@ -419,9 +420,9 @@ assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() - assert isinstance(wbdescr.jit_wb_if_flag, int) - assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) - assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) + assert is_valid_int(wbdescr.jit_wb_if_flag) + assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) + assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) def test_get_rid_of_debug_merge_point(self): operations = [ diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -1,4 +1,4 @@ - +import py from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan @@ -236,6 +236,16 @@ assert isinstance(loc, FakeFramePos) assert len(asm.moves) == 1 + def test_bogus_make_sure_var_in_reg(self): + b0, = newboxes(0) + longevity = {b0: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.next_instruction() + # invalid call to make_sure_var_in_reg(): box unknown so far + py.test.raises(KeyError, rm.make_sure_var_in_reg, b0) + def test_return_constant(self): asm = MockAsm() boxes, longevity = boxes_and_longevity(5) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -16,9 +16,11 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException from pypy.jit.codewriter import heaptracker, longlong +from pypy.rlib import longlong2float from pypy.rlib.rarithmetic import intmask, is_valid_int from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -1496,13 +1498,30 @@ c_nest, c_nest], 'void') def test_read_timestamp(self): + if sys.platform == 'win32': + # windows quite often is very inexact (like the old Intel 8259 PIC), + # so we stretch the time a little bit. + # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini, + # the test starts working at delay == 21670 and stops at 20600000. + # We take the geometric mean value. + from math import log, exp + delay_min = 21670 + delay_max = 20600000 + delay = int(exp((log(delay_min)+log(delay_max))/2)) + def wait_a_bit(): + for i in xrange(delay): pass + else: + def wait_a_bit(): + pass if longlong.is_64_bit: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') res1 = got1.getint() res2 = got2.getint() else: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') res1 = got1.getlonglong() res2 = got2.getlonglong() @@ -1598,6 +1617,12 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_convert_float_bytes(self): + t = 'int' if longlong.is_64_bit else 'float' + res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, + [boxfloat(2.5)], t).value + assert res == longlong2float.float2longlong(2.5) + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/support.py b/pypy/jit/backend/test/support.py --- a/pypy/jit/backend/test/support.py +++ b/pypy/jit/backend/test/support.py @@ -3,6 +3,7 @@ from pypy.rlib.debug import debug_print from pypy.translator.translator import TranslationContext, graphof from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from pypy.rlib.rarithmetic import is_valid_int class BaseCompiledMixin(object): @@ -24,7 +25,7 @@ from pypy.annotation import model as annmodel for arg in args: - assert isinstance(arg, int) + assert is_valid_int(arg) self.pre_translation_hook() t = self._get_TranslationContext() diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -449,6 +449,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) +OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) OperationBuilder.OPERATIONS = OPERATIONS @@ -502,11 +503,11 @@ else: assert 0, "unknown backend %r" % pytest.config.option.backend -# ____________________________________________________________ +# ____________________________________________________________ class RandomLoop(object): dont_generate_more = False - + def __init__(self, cpu, builder_factory, r, startvars=None): self.cpu = cpu if startvars is None: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -606,7 +606,7 @@ else: assert token struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): @@ -665,7 +665,7 @@ ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] operations.extend(ops) - + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: @@ -836,8 +836,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -847,8 +847,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -1242,6 +1242,15 @@ self.mc.MOVD_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1954,8 +1963,6 @@ mc.PUSH_r(ebx.value) elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2117,9 +2124,12 @@ # First, we need to save away the registers listed in # 'save_registers' that are not callee-save. XXX We assume that # the XMM registers won't be modified. We store them in - # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the - # single argument to closestack_addr below. - p = WORD + # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP] + # for the single argument to closestack_addr below. + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_sr(p, reg.value) @@ -2174,7 +2184,10 @@ # self._emit_call(-1, imm(self.releasegil_addr), args) # Finally, restore the registers saved above. - p = WORD + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_rs(reg.value, p) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -766,6 +766,18 @@ consider_cast_singlefloat_to_float = consider_cast_int_to_float + def consider_convert_float_bytes_to_longlong(self, op): + if longlong.is_64_bit: + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + else: + loc0 = self.xrm.loc(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,9 +601,12 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html + MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -36,15 +36,15 @@ # ____________________________________________________________ -if sys.platform == 'win32': - ensure_sse2_floats = lambda : None - # XXX check for SSE2 on win32 too +if WORD == 4: + extra = ['-DPYPY_X86_CHECK_SSE2'] else: - if WORD == 4: - extra = ['-DPYPY_X86_CHECK_SSE2'] - else: - extra = [] - ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( - compile_extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra, - )) + extra = [] + +if sys.platform != 'win32': + extra = ['-msse2', '-mfpmath=sse', + '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra + +ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( + compile_extra = extra, +)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/backend/x86/test/test_zmath.py b/pypy/jit/backend/x86/test/test_zmath.py --- a/pypy/jit/backend/x86/test/test_zmath.py +++ b/pypy/jit/backend/x86/test/test_zmath.py @@ -6,6 +6,8 @@ from pypy.translator.c.test.test_genc import compile from pypy.jit.backend.x86.support import ensure_sse2_floats from pypy.rlib import rfloat +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_print def get_test_case((fnname, args, expected)): @@ -16,16 +18,32 @@ expect_valueerror = (expected == ValueError) expect_overflowerror = (expected == OverflowError) check = test_direct.get_tester(expected) + unroll_args = unrolling_iterable(args) # def testfn(): + debug_print('calling', fnname, 'with arguments:') + for arg in unroll_args: + debug_print('\t', arg) try: got = fn(*args) except ValueError: - return expect_valueerror + if expect_valueerror: + return True + else: + debug_print('unexpected ValueError!') + return False except OverflowError: - return expect_overflowerror + if expect_overflowerror: + return True + else: + debug_print('unexpected OverflowError!') + return False else: - return check(got) + if check(got): + return True + else: + debug_print('unexpected result:', got) + return False # testfn.func_name = 'test_' + fnname return testfn diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -291,6 +291,11 @@ op1 = SpaceOperation('-live-', [], None) return [op, op1] + def _noop_rewrite(self, op): + return op + + rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,14 +968,20 @@ int_return %i2 """, transform=True) - def test_direct_ptradd(self): - from pypy.rpython.lltypesystem import rffi - def f(p, n): - return lltype.direct_ptradd(p, n) - self.encoding_test(f, [lltype.nullptr(rffi.CCHARP.TO), 123], """ - int_add %i0, %i1 -> %i2 - int_return %i2 - """, transform=True) + def test_convert_float_bytes_to_int(self): + from pypy.rlib.longlong2float import float2longlong + def f(x): + return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" + self.encoding_test(f, [25.0], """ + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,15 +1,16 @@ +from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr +from pypy.jit.metainterp.compile import ResumeAtPositionDescr +from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise +from pypy.rlib import longlong2float +from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rtimer import read_timestamp -from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import debug_start, debug_stop, ll_assert -from pypy.rlib.debug import make_sure_not_resized from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from pypy.jit.codewriter import heaptracker, longlong -from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise -from pypy.jit.metainterp.compile import ResumeAtPositionDescr + def arguments(*argtypes, **kwds): resulttype = kwds.pop('returns', None) @@ -20,6 +21,9 @@ return function return decorate +LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f' + + class LeaveFrame(JitException): pass @@ -663,6 +667,11 @@ a = float(a) return longlong.getfloatstorage(a) + @arguments("f", returns=LONGLONG_TYPECODE) + def bhimpl_convert_float_bytes_to_longlong(a): + a = longlong.getrealfloat(a) + return longlong2float.float2longlong(a) + # ---------- # control flow operations @@ -1309,7 +1318,7 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=(longlong.is_64_bit and "i" or "f")) + @arguments(returns=LONGLONG_TYPECODE) def bhimpl_ll_read_timestamp(): return read_timestamp() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -223,6 +223,7 @@ 'cast_float_to_singlefloat', 'cast_singlefloat_to_float', 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', + 'convert_float_bytes_to_longlong', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -419,6 +419,7 @@ 'CAST_INT_TO_FLOAT/1', # need some messy code in the backend 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', + 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3,6 +3,7 @@ import py from pypy import conftest +from pypy.jit.codewriter import longlong from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT @@ -14,6 +15,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) +from pypy.rlib.longlong2float import float2longlong from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -292,7 +294,7 @@ assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -953,7 +955,7 @@ self.meta_interp(f, [20], repeat=7) # the loop and the entry path as a single trace self.check_jitcell_token_count(1) - + # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path @@ -1470,7 +1472,7 @@ assert res == f(299) self.check_resops(guard_class=0, guard_nonnull=4, guard_nonnull_class=4, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1499,7 +1501,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1528,7 +1530,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -2636,7 +2638,7 @@ return sa assert self.meta_interp(f, [20]) == f(20) self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) - + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2677,7 +2679,7 @@ assert self.meta_interp(f, [20, 3]) == f(20, 3) self.check_jitcell_token_count(1) self.check_target_token_count(5) - + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2815,7 +2817,7 @@ for cell in get_stats().get_all_jitcell_tokens(): # Initialal trace with two labels and 5 retraces assert len(cell.target_tokens) <= 7 - + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -3793,6 +3795,16 @@ res = self.interp_operations(g, [1]) assert res == 3 + def test_float2longlong(self): + def f(n): + return float2longlong(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + # There are tests elsewhere to verify the correctness of this. + expected = float2longlong(x) + res = self.interp_operations(f, [x]) + assert longlong.getfloatstorage(res) == expected + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/jit/tl/tlc.py b/pypy/jit/tl/tlc.py --- a/pypy/jit/tl/tlc.py +++ b/pypy/jit/tl/tlc.py @@ -6,6 +6,8 @@ from pypy.jit.tl.tlopcode import * from pypy.jit.tl import tlopcode from pypy.rlib.jit import JitDriver, elidable +from pypy.rlib.rarithmetic import is_valid_int + class Obj(object): @@ -219,7 +221,7 @@ class Frame(object): def __init__(self, args, pc): - assert isinstance(pc, int) + assert is_valid_int(pc) self.args = args self.pc = pc self.stack = [] @@ -239,7 +241,7 @@ return interp_eval(code, pc, args, pool).int_o() def interp_eval(code, pc, args, pool): - assert isinstance(pc, int) + assert is_valid_int(pc) frame = Frame(args, pc) pc = frame.pc diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -16,13 +16,15 @@ appleveldefs = {} interpleveldefs = {} if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" interpleveldefs["clock_getres"] = "interp_time.clock_getres" for name in [ "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" ]: - interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name class Module(MixedModule): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.interpreter.error import exception_from_errno diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,7 +1,6 @@ from pypy.conftest import gettestobjspace import os import py -from pypy.rlib.rarithmetic import is_valid_int class AppTestSSL: @@ -31,7 +30,6 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert is_valid_int(_ssl.OPENSSL_VERSION_NUMBER) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) @@ -92,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -181,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -10,7 +10,7 @@ [('next', PyInterpreterState)], PyInterpreterStateStruct) PyThreadState = lltype.Ptr(cpython_struct( - "PyThreadState", + "PyThreadState", [('interp', PyInterpreterState), ('dict', PyObject), ])) @@ -19,12 +19,15 @@ def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread support is enabled) and reset the thread state to NULL, returning the - previous thread state (which is not NULL except in PyPy). If the lock has been created, + previous thread state. If the lock has been created, the current thread must have acquired it. (This function is available even when thread support is disabled at compile time.)""" + state = space.fromcache(InterpreterState) if rffi.aroundstate.before: rffi.aroundstate.before() - return lltype.nullptr(PyThreadState.TO) + tstate = state.swap_thread_state( + space, lltype.nullptr(PyThreadState.TO)) + return tstate @cpython_api([PyThreadState], lltype.Void) def PyEval_RestoreThread(space, tstate): @@ -35,6 +38,8 @@ when thread support is disabled at compile time.)""" if rffi.aroundstate.after: rffi.aroundstate.after() + state = space.fromcache(InterpreterState) + state.swap_thread_state(space, tstate) @cpython_api([], lltype.Void) def PyEval_InitThreads(space): @@ -67,28 +72,91 @@ dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext + +# Keep track of the ThreadStateCapsule for a particular execution context. The +# default is for new execution contexts not to have one; it is allocated on the +# first cpyext-based request for it. ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) +# Also keep track of whether it has been initialized yet or not (None is a valid +# PyThreadState for an execution context to have, when the GIL has been +# released, so a check against that can't be used to determine the need for +# initialization). +ExecutionContext.cpyext_initialized_threadstate = False + +def cleanup_cpyext_state(self): + try: + del self.cpyext_threadstate + except AttributeError: + pass + self.cpyext_initialized_threadstate = False +ExecutionContext.cleanup_cpyext_state = cleanup_cpyext_state + class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) def new_thread_state(self, space): + """ + Create a new ThreadStateCapsule to hold the PyThreadState for a + particular execution context. + + :param space: A space. + + :returns: A new ThreadStateCapsule holding a newly allocated + PyThreadState and referring to this interpreter state. + """ capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state ts.c_dict = make_ref(space, space.newdict()) return capsule + def get_thread_state(self, space): + """ + Get the current PyThreadState for the current execution context. + + :param space: A space. + + :returns: The current PyThreadState for the current execution context, + or None if it does not have one. + """ ec = space.getexecutioncontext() return self._get_thread_state(space, ec).memory + + def swap_thread_state(self, space, tstate): + """ + Replace the current thread state of the current execution context with a + new thread state. + + :param space: The space. + + :param tstate: The new PyThreadState for the current execution context. + + :returns: The old thread state for the current execution context, either + None or a PyThreadState. + """ + ec = space.getexecutioncontext() + capsule = self._get_thread_state(space, ec) + old_tstate = capsule.memory + capsule.memory = tstate + return old_tstate + def _get_thread_state(self, space, ec): - if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): + """ + Get the ThreadStateCapsule for the given execution context, possibly + creating a new one if it does not already have one. + + :param space: The space. + :param ec: The ExecutionContext of which to get the thread state. + :returns: The ThreadStateCapsule for the given execution context. + """ + if not ec.cpyext_initialized_threadstate: ec.cpyext_threadstate = self.new_thread_state(space) - + ec.cpyext_initialized_threadstate = True return ec.cpyext_threadstate @cpython_api([], PyThreadState, error=CANNOT_FAIL) @@ -105,13 +173,8 @@ def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument tstate, which may be NULL. The global interpreter lock must be held.""" - # All cpyext calls release and acquire the GIL, so this function has no - # side-effects - if tstate: - return lltype.nullptr(PyThreadState.TO) - else: - state = space.fromcache(InterpreterState) - return state.get_thread_state(space) + state = space.fromcache(InterpreterState) + return state.swap_thread_state(space, tstate) @cpython_api([PyThreadState], lltype.Void) def PyEval_AcquireThread(space, tstate): diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -23,16 +23,33 @@ #define FLAG_COMPAT 1 #define FLAG_SIZE_T 2 +typedef int (*destr_t)(PyObject *, void *); + + +/* Keep track of "objects" that have been allocated or initialized and + which will need to be deallocated or cleaned up somehow if overall + parsing fails. +*/ +typedef struct { + void *item; + destr_t destructor; +} freelistentry_t; + +typedef struct { + int first_available; + freelistentry_t *entries; +} freelist_t; + /* Forward */ static int vgetargs1(PyObject *, const char *, va_list *, int); static void seterror(int, const char *, int *, const char *, const char *); static char *convertitem(PyObject *, const char **, va_list *, int, int *, - char *, size_t, PyObject **); + char *, size_t, freelist_t *); static char *converttuple(PyObject *, const char **, va_list *, int, - int *, char *, size_t, int, PyObject **); + int *, char *, size_t, int, freelist_t *); static char *convertsimple(PyObject *, const char **, va_list *, int, char *, - size_t, PyObject **); + size_t, freelist_t *); static Py_ssize_t convertbuffer(PyObject *, void **p, char **); static int getbuffer(PyObject *, Py_buffer *, char**); @@ -129,57 +146,56 @@ /* Handle cleanup of allocated memory in case of exception */ -static void -cleanup_ptr(void *ptr) +static int +cleanup_ptr(PyObject *self, void *ptr) { - PyMem_FREE(ptr); -} - -static void -cleanup_buffer(void *ptr) -{ - PyBuffer_Release((Py_buffer *) ptr); + if (ptr) { + PyMem_FREE(ptr); + } + return 0; } static int -addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) +cleanup_buffer(PyObject *self, void *ptr) { - PyObject *cobj; - if (!*freelist) { - *freelist = PyList_New(0); - if (!*freelist) { - destr(ptr); - return -1; - } - } - cobj = PyCObject_FromVoidPtr(ptr, destr); - if (!cobj) { - destr(ptr); - return -1; - } - if (PyList_Append(*freelist, cobj)) { - Py_DECREF(cobj); - return -1; - } - Py_DECREF(cobj); - return 0; + Py_buffer *buf = (Py_buffer *)ptr; + if (buf) { + PyBuffer_Release(buf); + } + return 0; } static int -cleanreturn(int retval, PyObject *freelist) +addcleanup(void *ptr, freelist_t *freelist, destr_t destructor) { - if (freelist && retval != 0) { - /* We were successful, reset the destructors so that they - don't get called. */ - Py_ssize_t len = PyList_GET_SIZE(freelist), i; - for (i = 0; i < len; i++) - ((PyCObject *) PyList_GET_ITEM(freelist, i)) - ->destructor = NULL; - } - Py_XDECREF(freelist); - return retval; + int index; + + index = freelist->first_available; + freelist->first_available += 1; + + freelist->entries[index].item = ptr; + freelist->entries[index].destructor = destructor; + + return 0; } +static int +cleanreturn(int retval, freelist_t *freelist) +{ + int index; + + if (retval == 0) { + /* A failure occurred, therefore execute all of the cleanup + functions. + */ + for (index = 0; index < freelist->first_available; ++index) { + freelist->entries[index].destructor(NULL, + freelist->entries[index].item); + } + } + PyMem_Free(freelist->entries); + return retval; +} static int vgetargs1(PyObject *args, const char *format, va_list *p_va, int flags) @@ -195,7 +211,7 @@ const char *formatsave = format; Py_ssize_t i, len; char *msg; - PyObject *freelist = NULL; + freelist_t freelist = {0, NULL}; int compat = flags & FLAG_COMPAT; assert(compat || (args != (PyObject*)NULL)); @@ -251,16 +267,18 @@ format = formatsave; + freelist.entries = PyMem_New(freelistentry_t, max); + if (compat) { if (max == 0) { if (args == NULL) - return 1; + return cleanreturn(1, &freelist); PyOS_snprintf(msgbuf, sizeof(msgbuf), "%.200s%s takes no arguments", fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } else if (min == 1 && max == 1) { if (args == NULL) { @@ -269,26 +287,26 @@ fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } msg = convertitem(args, &format, p_va, flags, levels, msgbuf, sizeof(msgbuf), &freelist); if (msg == NULL) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); seterror(levels[0], msg, levels+1, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } else { PyErr_SetString(PyExc_SystemError, "old style getargs format uses new features"); - return 0; + return cleanreturn(0, &freelist); } } if (!PyTuple_Check(args)) { PyErr_SetString(PyExc_SystemError, "new style getargs format but argument is not a tuple"); - return 0; + return cleanreturn(0, &freelist); } len = PyTuple_GET_SIZE(args); @@ -308,7 +326,7 @@ message = msgbuf; } PyErr_SetString(PyExc_TypeError, message); - return 0; + return cleanreturn(0, &freelist); } for (i = 0; i < len; i++) { @@ -319,7 +337,7 @@ sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -328,10 +346,10 @@ *format != '|' && *format != ':' && *format != ';') { PyErr_Format(PyExc_SystemError, "bad format string: %.200s", formatsave); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } @@ -395,7 +413,7 @@ static char * converttuple(PyObject *arg, const char **p_format, va_list *p_va, int flags, int *levels, char *msgbuf, size_t bufsize, int toplevel, - PyObject **freelist) + freelist_t *freelist) { int level = 0; int n = 0; @@ -472,7 +490,7 @@ static char * convertitem(PyObject *arg, const char **p_format, va_list *p_va, int flags, - int *levels, char *msgbuf, size_t bufsize, PyObject **freelist) + int *levels, char *msgbuf, size_t bufsize, freelist_t *freelist) { char *msg; const char *format = *p_format; @@ -539,7 +557,7 @@ static char * convertsimple(PyObject *arg, const char **p_format, va_list *p_va, int flags, - char *msgbuf, size_t bufsize, PyObject **freelist) + char *msgbuf, size_t bufsize, freelist_t *freelist) { /* For # codes */ #define FETCH_SIZE int *q=NULL;Py_ssize_t *q2=NULL;\ @@ -1501,7 +1519,9 @@ const char *fname, *msg, *custom_msg, *keyword; int min = INT_MAX; int i, len, nargs, nkeywords; - PyObject *freelist = NULL, *current_arg; + PyObject *current_arg; + freelist_t freelist = {0, NULL}; + assert(args != NULL && PyTuple_Check(args)); assert(keywords == NULL || PyDict_Check(keywords)); @@ -1525,6 +1545,8 @@ for (len=0; kwlist[len]; len++) continue; + freelist.entries = PyMem_New(freelistentry_t, len); + nargs = PyTuple_GET_SIZE(args); nkeywords = (keywords == NULL) ? 0 : PyDict_Size(keywords); if (nargs + nkeywords > len) { @@ -1535,7 +1557,7 @@ len, (len == 1) ? "" : "s", nargs + nkeywords); - return 0; + return cleanreturn(0, &freelist); } /* convert tuple args and keyword args in same loop, using kwlist to drive process */ @@ -1549,7 +1571,7 @@ PyErr_Format(PyExc_RuntimeError, "More keyword list entries (%d) than " "format specifiers (%d)", len, i); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } current_arg = NULL; if (nkeywords) { @@ -1563,11 +1585,11 @@ "Argument given by name ('%s') " "and position (%d)", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } else if (nkeywords && PyErr_Occurred()) - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); else if (i < nargs) current_arg = PyTuple_GET_ITEM(args, i); @@ -1576,7 +1598,7 @@ levels, msgbuf, sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, custom_msg); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } continue; } @@ -1585,14 +1607,14 @@ PyErr_Format(PyExc_TypeError, "Required argument " "'%s' (pos %d) not found", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* current code reports success when all required args * fulfilled and no keyword args left, with no further * validation. XXX Maybe skip this in debug build ? */ if (!nkeywords) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); /* We are into optional args, skip thru to any remaining * keyword args */ @@ -1600,7 +1622,7 @@ if (msg) { PyErr_Format(PyExc_RuntimeError, "%s: '%s'", msg, format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -1608,7 +1630,7 @@ PyErr_Format(PyExc_RuntimeError, "more argument specifiers than keyword list entries " "(remaining format:'%s')", format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* make sure there are no extraneous keyword arguments */ @@ -1621,7 +1643,7 @@ if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } ks = PyString_AsString(key); for (i = 0; i < len; i++) { @@ -1635,12 +1657,12 @@ "'%s' is an invalid keyword " "argument for this function", ks); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -106,10 +106,7 @@ del obj import gc; gc.collect() - try: - del space.getexecutioncontext().cpyext_threadstate - except AttributeError: - pass + space.getexecutioncontext().cleanup_cpyext_state() for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) @@ -168,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -3,6 +3,10 @@ from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState from pypy.module.cpyext.pyobject import from_ref +from pypy.rpython.lltypesystem import lltype +from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts +from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head +from pypy.tool import leakfinder class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -21,6 +25,93 @@ # Should compile at least module.test() + + def test_thread_state_get(self): + module = self.import_extension('foo', [ + ("get", "METH_NOARGS", + """ + PyThreadState *tstate = PyThreadState_Get(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + if (tstate->interp != PyInterpreterState_Head()) { + return PyLong_FromLong(1); + } + if (tstate->interp->next != NULL) { + return PyLong_FromLong(2); + } + return PyLong_FromLong(3); + """), + ]) + assert module.get() == 3 + + def test_basic_threadstate_dance(self): + module = self.import_extension('foo', [ + ("dance", "METH_NOARGS", + """ + PyThreadState *old_tstate, *new_tstate; + + old_tstate = PyThreadState_Swap(NULL); + if (old_tstate == NULL) { + return PyLong_FromLong(0); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != NULL) { + return PyLong_FromLong(1); + } + + new_tstate = PyThreadState_Swap(old_tstate); + if (new_tstate != NULL) { + return PyLong_FromLong(2); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != old_tstate) { + return PyLong_FromLong(3); + } + + return PyLong_FromLong(4); + """), + ]) + assert module.dance() == 4 + + def test_threadstate_dict(self): + module = self.import_extension('foo', [ + ("getdict", "METH_NOARGS", + """ + PyObject *dict = PyThreadState_GetDict(); + Py_INCREF(dict); + return dict; + """), + ]) + assert isinstance(module.getdict(), dict) + + def test_savethread(self): + module = self.import_extension('foo', [ + ("bounce", "METH_NOARGS", + """ + PyThreadState *tstate = PyEval_SaveThread(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + + if (PyThreadState_Get() != NULL) { + return PyLong_FromLong(1); + } + + PyEval_RestoreThread(tstate); + + if (PyThreadState_Get() != tstate) { + return PyLong_FromLong(2); + } + + return PyLong_FromLong(3); + """), + ]) + + + class TestInterpreterState(BaseApiTest): def test_interpreter_head(self, space, api): state = api.PyInterpreterState_Head() @@ -29,31 +120,3 @@ def test_interpreter_next(self, space, api): state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) - -class TestThreadState(BaseApiTest): - def test_thread_state_get(self, space, api): - ts = api.PyThreadState_Get() - assert ts != nullptr(PyThreadState.TO) - - def test_thread_state_interp(self, space, api): - ts = api.PyThreadState_Get() - assert ts.c_interp == api.PyInterpreterState_Head() - assert ts.c_interp.c_next == nullptr(PyInterpreterState.TO) - - def test_basic_threadstate_dance(self, space, api): - # Let extension modules call these functions, - # Not sure of the semantics in pypy though. - # (cpyext always acquires and releases the GIL around calls) - tstate = api.PyThreadState_Swap(None) - assert tstate is not None - assert not api.PyThreadState_Swap(tstate) - - api.PyEval_AcquireThread(tstate) - api.PyEval_ReleaseThread(tstate) - - def test_threadstate_dict(self, space, api): - ts = api.PyThreadState_Get() - ref = ts.c_dict - assert ref == api.PyThreadState_GetDict() - w_obj = from_ref(space, ref) - assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py --- a/pypy/module/math/test/test_direct.py +++ b/pypy/module/math/test/test_direct.py @@ -59,6 +59,9 @@ ('copysign', (1.5, -0.0), -1.5), ('copysign', (1.5, INFINITY), 1.5), ('copysign', (1.5, -INFINITY), -1.5), + ] + if sys.platform != 'win32': # all NaNs seem to be negative there...? + IRREGCASES += [ ('copysign', (1.5, NAN), 1.5), ('copysign', (1.75, -NAN), -1.75), # special case for -NAN here ] diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -37,26 +37,44 @@ 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', + 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'generic': 'interp_boxes.W_GenericBox', 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', + 'bool8': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'byte': 'interp_boxes.W_Int8Box', 'uint8': 'interp_boxes.W_UInt8Box', + 'ubyte': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'short': 'interp_boxes.W_Int16Box', 'uint16': 'interp_boxes.W_UInt16Box', + 'ushort': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'intc': 'interp_boxes.W_Int32Box', 'uint32': 'interp_boxes.W_UInt32Box', + 'uintc': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', 'uint64': 'interp_boxes.W_UInt64Box', + 'longlong': 'interp_boxes.W_LongLongBox', + 'ulonglong': 'interp_boxes.W_ULongLongBox', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', 'float_': 'interp_boxes.W_Float64Box', 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', + 'intp': 'types.IntP.BoxType', + 'uintp': 'types.UIntP.BoxType', + 'flexible': 'interp_boxes.W_FlexibleBox', + 'character': 'interp_boxes.W_CharacterBox', + 'str_': 'interp_boxes.W_StringBox', + 'unicode_': 'interp_boxes.W_UnicodeBox', + 'void': 'interp_boxes.W_VoidBox', } # ufuncs @@ -67,6 +85,7 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arctan2", "arctan2"), ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), @@ -77,7 +96,10 @@ ("true_divide", "true_divide"), ("equal", "equal"), ("exp", "exp"), + ("exp2", "exp2"), + ("expm1", "expm1"), ("fabs", "fabs"), + ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), ("greater", "greater"), @@ -92,8 +114,10 @@ ("radians", "radians"), ("degrees", "degrees"), ("deg2rad", "radians"), + ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), ("sign", "sign"), + ("signbit", "signbit"), ("sin", "sin"), ("sinh", "sinh"), ("subtract", "subtract"), @@ -106,6 +130,9 @@ ('bitwise_not', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), + ('isneginf', 'isneginf'), + ('isposinf', 'isposinf'), + ('isfinite', 'isfinite'), ('logical_and', 'logical_and'), ('logical_xor', 'logical_xor'), ('logical_not', 'logical_not'), @@ -116,6 +143,8 @@ ('log1p', 'log1p'), ('power', 'power'), ('floor_divide', 'floor_divide'), + ('logaddexp', 'logaddexp'), + ('logaddexp2', 'logaddexp2'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -33,7 +33,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat"] + "unegative", "flat", "tostring"] TWO_ARG_FUNCTIONS = ["dot", 'take'] class FakeSpace(object): @@ -51,6 +51,8 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" + w_unicode = "unicode" def __init__(self): """NOT_RPYTHON""" @@ -91,8 +93,12 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, long): + return LongObject(obj) elif isinstance(obj, W_Root): return obj + elif isinstance(obj, str): + return StringObject(obj) raise NotImplementedError def newlist(self, items): @@ -120,6 +126,11 @@ return int(w_obj.floatval) raise NotImplementedError + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj.v + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj @@ -151,7 +162,13 @@ return instantiate(klass) def newtuple(self, list_w): - raise ValueError + return ListObject(list_w) + + def newdict(self): + return {} + + def setitem(self, dict, item, value): + dict[item] = value def len_w(self, w_obj): if isinstance(w_obj, ListObject): @@ -178,6 +195,11 @@ def __init__(self, intval): self.intval = intval +class LongObject(W_Root): + tp = FakeSpace.w_long + def __init__(self, intval): + self.intval = intval + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): @@ -190,6 +212,11 @@ self.stop = stop self.step = step +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, v): + self.v = v + class InterpreterState(object): def __init__(self, code): self.code = code @@ -407,6 +434,9 @@ w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) + elif self.name == "tostring": + arr.descr_tostring(interp.space) + w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,24 +1,25 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.stringtype import str_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () def new_dtype_getter(name): - def get_dtype(space): + def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return getattr(get_dtype_cache(space), "w_%sdtype" % name) def new(space, w_subtype, w_value): - dtype = get_dtype(space) + dtype = _get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -37,6 +38,9 @@ w_subtype.getname(space, '?') ) + def get_dtype(self, space): + return self._get_dtype(space) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -44,12 +48,12 @@ return space.format(self.item(space), w_spec) def descr_int(self, space): - box = self.convert_to(W_LongBox.get_dtype(space)) + box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box.get_dtype(space)) + box = self.convert_to(W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -58,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -130,7 +137,7 @@ class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("bool") + descr__new__, _get_dtype = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): _attrs_ = () @@ -146,34 +153,40 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int8") + descr__new__, _get_dtype = new_dtype_getter("int8") class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint8") + descr__new__, _get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int16") + descr__new__, _get_dtype = new_dtype_getter("int16") class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint16") + descr__new__, _get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int32") + descr__new__, _get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint32") + descr__new__, _get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("long") + descr__new__, _get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("ulong") + descr__new__, _get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int64") + descr__new__, _get_dtype = new_dtype_getter("int64") + +class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('longlong') class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint64") + descr__new__, _get_dtype = new_dtype_getter("uint64") + +class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): _attrs_ = () @@ -182,16 +195,71 @@ _attrs_ = () class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float32") + descr__new__, _get_dtype = new_dtype_getter("float32") class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float64") + descr__new__, _get_dtype = new_dtype_getter("float64") +class W_FlexibleBox(W_GenericBox): + def __init__(self, arr, ofs, dtype): + self.arr = arr # we have to keep array alive + self.ofs = ofs + self.dtype = dtype + + def get_dtype(self, space): + return self.arr.dtype + @unwrap_spec(self=W_GenericBox) def descr_index(space, self): return space.index(self.item(space)) +class W_VoidBox(W_FlexibleBox): + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) + + @unwrap_spec(item=str) + def descr_setitem(self, space, item, w_value): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + dtype.itemtype.store(self.arr, 1, self.ofs, ofs, + dtype.coerce(space, w_value)) + +class W_CharacterBox(W_FlexibleBox): + pass + +class W_StringBox(W_CharacterBox): + def descr__new__string_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_string_dtype + + arg = space.str_w(space.str(w_arg)) + arr = W_NDimArray([1], new_string_dtype(space, len(arg))) + for i in range(len(arg)): + arr.storage[i] = arg[i] + return W_StringBox(arr, 0, arr.dtype) + + +class W_UnicodeBox(W_CharacterBox): + def descr__new__unicode_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + + arg = space.unicode_w(unicode_from_object(space, w_arg)) + arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -348,3 +416,28 @@ __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) + +W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, + __module__ = "numpypy", + __getitem__ = interp2app(W_VoidBox.descr_getitem), + __setitem__ = interp2app(W_VoidBox.descr_setitem), +) + +W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, + __module__ = "numpypy", +) + +W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), +) + +W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), +) + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,26 +1,29 @@ + +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.module.micronumpy import types, interp_boxes from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" - - -VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) +VOIDLTR = 'V' +STRINGLTR = 'S' +UNICODELTR = 'U' class W_Dtype(Wrappable): _immutable_fields_ = ["itemtype", "num", "kind"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + alternate_constructors=[], aliases=[], + fields=None, fieldnames=None, native=True): self.itemtype = itemtype self.num = num self.kind = kind @@ -29,53 +32,28 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases - - def malloc(self, length): - # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - ) + self.fields = fields + self.fieldnames = fieldnames + self.native = native @specialize.argtype(1) def box(self, value): return self.itemtype.box(value) def coerce(self, space, w_item): - return self.itemtype.coerce(space, w_item) + return self.itemtype.coerce(space, self, w_item) - def getitem(self, storage, i): - return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + def getitem(self, arr, i): + return self.itemtype.read(arr, 1, i, 0) - def getitem_bool(self, storage, i): - isize = self.itemtype.get_element_size() - return self.itemtype.read_bool(storage, isize, i, 0) + def getitem_bool(self, arr, i): + return self.itemtype.read_bool(arr, 1, i, 0) - def setitem(self, storage, i, box): - self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + def setitem(self, arr, i, box): + self.itemtype.store(arr, 1, i, 0, box) def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) - - def descr__new__(space, w_subtype, w_dtype): - cache = get_dtype_cache(space) - - if space.is_w(w_dtype, space.w_None): - return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): - name = space.str_w(w_dtype) - for dtype in cache.builtin_dtypes: - if dtype.name == name or dtype.char == name or name in dtype.aliases: - return dtype - else: - for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: - return dtype - if w_dtype is dtype.w_box_type: - return dtype - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) def descr_str(self, space): return space.wrap(self.name) @@ -86,6 +64,14 @@ def descr_get_itemsize(self, space): return space.wrap(self.itemtype.get_element_size()) + def descr_get_byteorder(self, space): + if self.native: + return space.wrap('=') + return space.wrap(nonnative_byteorder_prefix) + + def descr_get_alignment(self, space): + return space.wrap(self.itemtype.alignment) + def descr_get_shape(self, space): return space.newtuple([]) @@ -99,31 +85,193 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_get_fields(self, space): + if self.fields is None: + return space.w_None + w_d = space.newdict() + for name, (offset, subdtype) in self.fields.iteritems(): + space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, + space.wrap(offset)])) + return w_d + + def descr_get_names(self, space): + if self.fieldnames is None: + return space.w_None + return space.newtuple([space.wrap(name) for name in self.fieldnames]) + + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + if self.fields is None: + raise OperationError(space.w_KeyError, space.wrap("There are no keys in dtypes %s" % self.name)) + try: + return self.fields[item][1] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap("Field named %s not found" % item)) + def is_int_type(self): return (self.kind == SIGNEDLTR or self.kind == UNSIGNEDLTR or self.kind == BOOLLTR) + def is_signed(self): + return self.kind == SIGNEDLTR + def is_bool_type(self): return self.kind == BOOLLTR + def is_record_type(self): + return self.fields is not None + + def __repr__(self): + if self.fields is not None: + return '' % self.fields + return '' % self.itemtype + + def get_size(self): + return self.itemtype.get_element_size() + +def dtype_from_list(space, w_lst): + lst_w = space.listview(w_lst) + fields = {} + offset = 0 + ofs_and_items = [] + fieldnames = [] + for w_elem in lst_w: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc) + fldname = space.str_w(w_fldname) + if fldname in fields: + raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + assert isinstance(subdtype, W_Dtype) + fields[fldname] = (offset, subdtype) + ofs_and_items.append((offset, subdtype.itemtype)) + offset += subdtype.itemtype.get_element_size() + fieldnames.append(fldname) + itemtype = types.RecordType(ofs_and_items, offset) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()), + "V", space.gettypefor(interp_boxes.W_VoidBox), fields=fields, + fieldnames=fieldnames) + +def dtype_from_dict(space, w_dict): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from dict")) + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + w_box_type = space.gettypefor(interp_boxes.W_VoidBox) + raise OperationError(space.w_NotImplementedError, space.wrap( + "pure void dtype")) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) + +def dtype_from_spec(space, name): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from spec")) + +def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + + if space.is_w(w_dtype, space.w_None): + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_str): + name = space.str_w(w_dtype) + if ',' in name: + return dtype_from_spec(space, name) + try: + return cache.dtypes_by_name[name] + except KeyError: + pass + if name[0] in 'VSU' or name[0] in '<>=' and name[1] in 'VSU': + return variable_dtype(space, name) + elif space.isinstance_w(w_dtype, space.w_list): + return dtype_from_list(space, w_dtype) + elif space.isinstance_w(w_dtype, space.w_dict): + return dtype_from_dict(space, w_dtype) + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpypy", - __new__ = interp2app(W_Dtype.descr__new__.im_func), + __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __getitem__ = interp2app(W_Dtype.descr_getitem), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), + char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), + fields = GetSetProperty(W_Dtype.descr_get_fields), + names = GetSetProperty(W_Dtype.descr_get_names), ) W_Dtype.typedef.acceptable_as_base_class = False +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' + +def new_string_dtype(space, size): + return W_Dtype( + types.StringType(size), + num=18, + kind=STRINGLTR, + name='string', + char='S' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + ) + +def new_unicode_dtype(space, size): + return W_Dtype( + types.UnicodeType(size), + num=19, + kind=UNICODELTR, + name='unicode', + char='U' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( @@ -239,18 +387,134 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - + self.w_stringdtype = W_Dtype( + types.StringType(1), + num=18, + kind=STRINGLTR, + name='string', + char='S', + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + alternate_constructors=[space.w_str], + ) + self.w_unicodedtype = W_Dtype( + types.UnicodeType(1), + num=19, + kind=UNICODELTR, + name='unicode', + char='U', + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + alternate_constructors=[space.w_unicode], + ) + self.w_voiddtype = W_Dtype( + types.VoidType(0), + num=20, + kind=VOIDLTR, + name='void', + char='V', + w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + #alternate_constructors=[space.w_buffer], + # XXX no buffer in space + ) self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, - self.w_float64dtype + self.w_int64dtype, self.w_uint64dtype, + self.w_float32dtype, + self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, + self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) + self.dtypes_by_name = {} + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): + self.dtypes_by_name[dtype.name] = dtype + can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + self.dtypes_by_name[can_name] = dtype + self.dtypes_by_name[byteorder_prefix + can_name] = dtype + self.dtypes_by_name['=' + can_name] = dtype + new_name = nonnative_byteorder_prefix + can_name + itemtypename = dtype.itemtype.__class__.__name__ + itemtype = getattr(types, 'NonNative' + itemtypename)() + self.dtypes_by_name[new_name] = W_Dtype( + itemtype, + dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, + native=False) + for alias in dtype.aliases: + self.dtypes_by_name[alias] = dtype + self.dtypes_by_name[dtype.char] = dtype + + typeinfo_full = { + 'LONGLONG': self.w_int64dtype, + 'SHORT': self.w_int16dtype, + 'VOID': self.w_voiddtype, + #'LONGDOUBLE':, + 'UBYTE': self.w_uint8dtype, + 'UINTP': self.w_ulongdtype, + 'ULONG': self.w_ulongdtype, + 'LONG': self.w_longdtype, + 'UNICODE': self.w_unicodedtype, + #'OBJECT', + 'ULONGLONG': self.w_uint64dtype, + 'STRING': self.w_stringdtype, + #'CDOUBLE', + #'DATETIME', + 'UINT': self.w_uint32dtype, + 'INTP': self.w_longdtype, + #'HALF', + 'BYTE': self.w_int8dtype, + #'CFLOAT': , + #'TIMEDELTA', + 'INT': self.w_int32dtype, + 'DOUBLE': self.w_float64dtype, + 'USHORT': self.w_uint16dtype, + 'FLOAT': self.w_float32dtype, + 'BOOL': self.w_booldtype, + #, 'CLONGDOUBLE'] + } + typeinfo_partial = { + 'Generic': interp_boxes.W_GenericBox, + 'Character': interp_boxes.W_CharacterBox, + 'Flexible': interp_boxes.W_FlexibleBox, + 'Inexact': interp_boxes.W_InexactBox, + 'Integer': interp_boxes.W_IntegerBox, + 'SignedInteger': interp_boxes.W_SignedIntegerBox, + 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, + #'ComplexFloating', + 'Number': interp_boxes.W_NumberBox, + 'Floating': interp_boxes.W_FloatingBox + } + w_typeinfo = space.newdict() + for k, v in typeinfo_partial.iteritems(): + space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) + for k, dtype in typeinfo_full.iteritems(): + itemsize = dtype.itemtype.get_element_size() + items_w = [space.wrap(dtype.char), + space.wrap(dtype.num), + space.wrap(itemsize * 8), # in case of changing + # number of bits per byte in the future + space.wrap(itemsize or 1)] + if dtype.is_int_type(): + if dtype.kind == BOOLLTR: + w_maxobj = space.wrap(1) + w_minobj = space.wrap(0) + elif dtype.is_signed(): + w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + else: + w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_minobj = space.wrap(0) + items_w = items_w + [w_maxobj, w_minobj] + items_w = items_w + [dtype.w_box_type] + + w_tuple = space.newtuple(items_w) + space.setitem(w_typeinfo, space.wrap(k), w_tuple) + self.w_typeinfo = w_typeinfo def get_dtype_cache(space): return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,7 +2,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate from pypy.module.micronumpy.strides import calculate_broadcast_strides,\ - calculate_slice_strides, calculate_dot_strides + calculate_slice_strides, calculate_dot_strides, enumerate_chunks """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see @@ -42,28 +42,81 @@ we can go faster. All the calculations happen in next() -next_step_x() tries to do the iteration for a number of steps at once, +next_skip_x() tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ # structures to describe slicing -class Chunk(object): +class BaseChunk(object): + pass + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + + arr = arr.get_concrete() + ofs, subdtype = arr.dtype.fields[self.name] + # strides backstrides are identical, ofs only changes start + return W_NDimSlice(arr.start + ofs, arr.strides[:], arr.backstrides[:], + arr.shape[:], arr, subdtype) + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice,\ + VirtualSlice, ConcreteArray + + shape = self.extend_shape(arr.shape) + if not isinstance(arr, ConcreteArray): + return VirtualSlice(arr, self, shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.strides, + arr.backstrides, self.l) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], arr) + + +class Chunk(BaseChunk): + axis_step = 1 + def __init__(self, start, stop, step, lgt): self.start = start self.stop = stop self.step = step self.lgt = lgt - def extend_shape(self, shape): - if self.step != 0: - shape.append(self.lgt) - def __repr__(self): return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, self.lgt) +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + class BaseTransform(object): pass @@ -95,17 +148,19 @@ raise NotImplementedError class ArrayIterator(BaseIterator): - def __init__(self, size): + def __init__(self, size, element_size): self.offset = 0 self.size = size + self.element_size = element_size def next(self, shapelen): return self.next_skip_x(1) - def next_skip_x(self, ofs): + def next_skip_x(self, x): arr = instantiate(ArrayIterator) arr.size = self.size - arr.offset = self.offset + ofs + arr.offset = self.offset + x * self.element_size + arr.element_size = self.element_size return arr def next_no_increase(self, shapelen): @@ -152,7 +207,7 @@ elif isinstance(t, ViewTransform): r = calculate_slice_strides(self.res_shape, self.offset, self.strides, - self.backstrides, t.chunks) + self.backstrides, t.chunks.l) return ViewIterator(r[1], r[2], r[3], r[0]) @jit.unroll_safe @@ -214,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,10 +7,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, ViewIterator) -from pypy.module.micronumpy.strides import (calculate_slice_strides, - shape_agreement, find_shape_and_elems, get_shape_from_iterable, - calc_new_strides, to_coords) + SkipLastAxisIterator, Chunk, ViewIterator, Chunks, RecordChunk, + NewAxisChunk) +from pypy.module.micronumpy.strides import (shape_agreement, + find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi @@ -47,7 +47,7 @@ ) flat_set_driver = jit.JitDriver( greens=['shapelen', 'base'], - reds=['step', 'ai', 'lngth', 'arr', 'basei'], + reds=['step', 'lngth', 'ri', 'arr', 'basei'], name='numpy_flatset', ) @@ -79,12 +79,13 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + shape = _find_shape(space, w_size) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -225,8 +236,7 @@ return scalar_w(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - out_size = support.product(out_shape) - result = W_NDimArray(out_size, out_shape, dtype) + result = W_NDimArray(out_shape, dtype) # This is the place to add fpypy and blas return multidim_dot(space, self.get_concrete(), other.get_concrete(), result, dtype, @@ -245,7 +255,7 @@ return space.wrap(self.find_dtype().itemtype.get_element_size()) def descr_get_nbytes(self, space): - return space.wrap(self.size * self.find_dtype().itemtype.get_element_size()) + return space.wrap(self.size) @jit.unroll_safe def descr_get_shape(self, space): @@ -253,13 +263,16 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.size, w_iterable) + support.product(self.shape), w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) + + def get_size(self): + return self.size // self.find_dtype().get_size() def descr_copy(self, space): return self.copy(space) @@ -279,7 +292,7 @@ def empty_copy(self, space, dtype): shape = self.shape - return W_NDimArray(support.product(shape), shape[:], dtype, 'C') + return W_NDimArray(shape[:], dtype, 'C') def descr_len(self, space): if len(self.shape): @@ -320,7 +333,16 @@ """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape """ + if space.isinstance_w(w_idx, space.w_str): + return False shape_len = len(self.shape) + if space.isinstance_w(w_idx, space.w_tuple): + for w_item in space.fixedview(w_idx): + if (space.isinstance_w(w_item, space.w_slice) or + space.is_w(w_item, space.w_None)): + return False + elif space.is_w(w_idx, space.w_None): + return False if shape_len == 0: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -336,43 +358,55 @@ if lgt > shape_len: raise OperationError(space.w_IndexError, space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True + return lgt == shape_len @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_str): + idx = space.str_w(w_idx) + dtype = self.find_dtype() + if not dtype.is_record_type() or idx not in dtype.fields: + raise OperationError(space.w_ValueError, space.wrap( + "field named %s not defined" % idx)) + return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): - return [Chunk(*space.decode_index4(w_idx, self.shape[0]))] - return [Chunk(*space.decode_index4(w_item, self.shape[i])) for i, w_item in - enumerate(space.fixedview(w_idx))] + return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))]) + elif space.is_w(w_idx, space.w_None): + return Chunks([NewAxisChunk()]) + result = [] + i = 0 + for w_item in space.fixedview(w_idx): + if space.is_w(w_item, space.w_None): + result.append(NewAxisChunk()) + else: + result.append(Chunk(*space.decode_index4(w_item, + self.shape[i]))) + i += 1 + return Chunks(result) - def count_all_true(self, arr): - sig = arr.find_sig() - frame = sig.create_frame(arr) - shapelen = len(arr.shape) + def count_all_true(self): + sig = self.find_sig() + frame = sig.create_frame(self) + shapelen = len(self.shape) s = 0 iter = None while not frame.done(): - count_driver.jit_merge_point(arr=arr, frame=frame, iter=iter, s=s, + count_driver.jit_merge_point(arr=self, frame=frame, iter=iter, s=s, shapelen=shapelen) iter = frame.get_final_iter() - s += arr.dtype.getitem_bool(arr.storage, iter.offset) + s += self.dtype.getitem_bool(self, iter.offset) frame.next(shapelen) return s def getitem_filter(self, space, arr): concr = arr.get_concrete() - if concr.size > self.size: + if concr.get_size() > self.get_size(): raise OperationError(space.w_IndexError, space.wrap("index out of range for array")) - size = self.count_all_true(concr) - res = W_NDimArray(size, [size], self.find_dtype()) - ri = ArrayIterator(size) + size = concr.count_all_true() + res = W_NDimArray([size], self.find_dtype()) + ri = res.create_iter() shapelen = len(self.shape) argi = concr.create_iter() sig = self.find_sig() @@ -382,7 +416,7 @@ filter_driver.jit_merge_point(concr=concr, argi=argi, ri=ri, frame=frame, v=v, res=res, sig=sig, shapelen=shapelen, self=self) - if concr.dtype.getitem_bool(concr.storage, argi.offset): + if concr.dtype.getitem_bool(concr, argi.offset): v = sig.eval(frame, self) res.setitem(ri.offset, v) ri = ri.next(1) @@ -392,23 +426,6 @@ frame.next(shapelen) return res - def setitem_filter(self, space, idx, val): - size = self.count_all_true(idx) - arr = SliceArray([size], self.dtype, self, val) - sig = arr.find_sig() - shapelen = len(self.shape) - frame = sig.create_frame(arr) - idxi = idx.create_iter() - while not frame.done(): - filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, - frame=frame, arr=arr, - shapelen=shapelen) - if idx.dtype.getitem_bool(idx.storage, idxi.offset): - sig.eval(frame, arr) - frame.next_from_second(1) - frame.next_first(shapelen) - idxi = idxi.next(shapelen) - def descr_getitem(self, space, w_idx): if (isinstance(w_idx, BaseArray) and w_idx.shape == self.shape and w_idx.find_dtype().is_bool_type()): @@ -418,7 +435,24 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return self.create_slice(chunks) + return chunks.apply(self) + + def setitem_filter(self, space, idx, val): + size = idx.count_all_true() + arr = SliceArray([size], self.dtype, self, val) + sig = arr.find_sig() + shapelen = len(self.shape) + frame = sig.create_frame(arr) + idxi = idx.create_iter() + while not frame.done(): + filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, + frame=frame, arr=arr, + shapelen=shapelen) + if idx.dtype.getitem_bool(idx, idxi.offset): + sig.eval(frame, arr) + frame.next_from_second(1) + frame.next_first(shapelen) + idxi = idxi.next(shapelen) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -436,26 +470,9 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(chunks).get_concrete() + view = chunks.apply(self).get_concrete() view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, chunks): - shape = [] - i = -1 - for i, chunk in enumerate(chunks): - chunk.extend_shape(shape) - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - if not isinstance(self, ConcreteArray): - return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.shape, self.start, self.strides, - self.backstrides, chunks) - _, start, strides, backstrides = r - return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], self) - def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -472,13 +489,16 @@ w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - new_shape = get_shape_from_iterable(space, self.size, w_shape) + new_shape = get_shape_from_iterable(space, support.product(self.shape), + w_shape) return self.reshape(space, new_shape) def reshape(self, space, new_shape): concrete = self.get_concrete() # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, concrete.shape, + new_strides = None + if self.size > 0: + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides, concrete.order) if new_strides: # We can create a view, strides somehow match up. @@ -505,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) - w_denom = space.wrap(self.size) + w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -527,7 +547,7 @@ concr.fill(space, w_value) def descr_nonzero(self, space): - if self.size > 1: + if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -606,8 +626,7 @@ space.wrap("axis unsupported for take")) index_i = index.create_iter() res_shape = index.shape - size = support.product(res_shape) - res = W_NDimArray(size, res_shape[:], concr.dtype, concr.order) + res = W_NDimArray(res_shape[:], concr.dtype, concr.order) res_i = res.create_iter() shapelen = len(index.shape) sig = concr.find_sig() @@ -646,6 +665,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_tostring(self, space): + ra = ToStringArray(self) + loop.compute(ra) + return space.wrap(ra.s.build()) + def compute_first_step(self, sig, frame): pass @@ -667,8 +691,7 @@ """ Intermediate class representing a literal. """ - size = 1 - _attrs_ = ["dtype", "value", "shape"] + _attrs_ = ["dtype", "value", "shape", "size"] def __init__(self, dtype, value): self.shape = [] @@ -676,6 +699,7 @@ self.dtype = dtype assert isinstance(value, interp_boxes.W_GenericBox) self.value = value + self.size = dtype.get_size() def find_dtype(self): return self.dtype @@ -693,8 +717,7 @@ return self def reshape(self, space, new_shape): - size = support.product(new_shape) - res = W_NDimArray(size, new_shape, self.dtype, 'C') + res = W_NDimArray(new_shape, self.dtype, 'C') res.setitem(0, self.value) return res @@ -702,11 +725,13 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg + self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): # Function for deleting references to source arrays, @@ -714,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.size, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -742,7 +772,6 @@ def __init__(self, child, chunks, shape): self.child = child self.chunks = chunks - self.size = support.product(shape) VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) def create_sig(self): @@ -754,15 +783,16 @@ def force_if_needed(self): if self.forced_result is None: concr = self.child.get_concrete() - self.forced_result = concr.create_slice(self.chunks) + self.forced_result = self.chunks.apply(concr) def _del_sources(self): self.child = None class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -774,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -781,13 +817,13 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right self.calc_dtype = calc_dtype - self.size = support.product(self.shape) def _del_sources(self): self.left = None @@ -815,14 +851,34 @@ self.left.create_sig(), self.right.create_sig()) class ResultArray(Call2): - def __init__(self, child, size, shape, dtype, res=None, order='C'): + def __init__(self, child, shape, dtype, res=None, order='C'): if res is None: - res = W_NDimArray(size, shape, dtype, order) + res = W_NDimArray(shape, dtype, order) Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig + +class ToStringArray(Call1): + def __init__(self, child): + dtype = child.find_dtype() + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) + Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, + child) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) + + def create_sig(self): + return signature.ToStringSignature(self.calc_dtype, + self.values.create_sig()) def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -895,13 +951,13 @@ """ _immutable_fields_ = ['storage'] - def __init__(self, size, shape, dtype, order='C', parent=None): - self.size = size + def __init__(self, shape, dtype, order='C', parent=None): self.parent = parent + self.size = support.product(shape) * dtype.get_size() if parent is not None: self.storage = parent.storage else: - self.storage = dtype.malloc(size) + self.storage = dtype.itemtype.malloc(self.size) self.order = order self.dtype = dtype if self.strides is None: @@ -920,13 +976,14 @@ return self.dtype def getitem(self, item): - return self.dtype.getitem(self.storage, item) + return self.dtype.getitem(self, item) def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): + dtype = self.find_dtype() strides = [] backstrides = [] s = 1 @@ -934,8 +991,8 @@ if self.order == 'C': shape_rev.reverse() for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) s *= sh if self.order == 'C': strides.reverse() @@ -983,9 +1040,9 @@ shapelen = len(self.shape) if shapelen == 1: rffi.c_memcpy( - rffi.ptradd(self.storage, self.start * itemsize), - rffi.ptradd(w_value.storage, w_value.start * itemsize), - self.size * itemsize + rffi.ptradd(self.storage, self.start), + rffi.ptradd(w_value.storage, w_value.start), + self.size ) else: dest = SkipLastAxisIterator(self) @@ -1000,7 +1057,7 @@ dest.next() def copy(self, space): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.shape[:], self.dtype, self.order) array.setslice(space, self) return array @@ -1014,14 +1071,15 @@ class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): + def __init__(self, start, strides, backstrides, shape, parent, dtype=None): assert isinstance(parent, ConcreteArray) if isinstance(parent, W_NDimSlice): parent = parent.parent self.strides = strides self.backstrides = backstrides - ViewArray.__init__(self, support.product(shape), shape, parent.dtype, - parent.order, parent) + if dtype is None: + dtype = parent.dtype + ViewArray.__init__(self, shape, dtype, parent.order, parent) self.start = start def create_iter(self, transforms=None): @@ -1031,18 +1089,19 @@ def setshape(self, space, new_shape): if len(self.shape) < 1: return - elif len(self.shape) < 2: + elif len(self.shape) < 2 or self.size < 1: # TODO: this code could be refactored into calc_strides # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] - s = self.strides[0] + dtype = self.find_dtype() + s = self.strides[0] // dtype.get_size() if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) + s *= max(1, sh) if self.order == 'C': strides.reverse() backstrides.reverse() @@ -1069,14 +1128,16 @@ """ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value) def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) def create_iter(self, transforms=None): - return ArrayIterator(self.size).apply_transformations(self, transforms) + esize = self.find_dtype().get_size() + return ArrayIterator(self.size, esize).apply_transformations(self, + transforms) def create_sig(self): return signature.ArraySignature(self.dtype) @@ -1084,22 +1145,18 @@ def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) -def _find_size_and_shape(space, w_size): +def _find_shape(space, w_size): if space.isinstance_w(w_size, space.w_int): - size = space.int_w(w_size) - shape = [size] - else: - size = 1 - shape = [] - for w_item in space.fixedview(w_size): - item = space.int_w(w_item) - size *= item - shape.append(item) - return size, shape + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + return shape @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1129,28 +1186,33 @@ if copy: return w_item_or_iterable.copy(space) return w_item_or_iterable - shape, elems_w = find_shape_and_elems(space, w_item_or_iterable) + if w_dtype is None or space.is_w(w_dtype, space.w_None): + dtype = None + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + shape, elems_w = find_shape_and_elems(space, w_item_or_iterable, dtype) # they come back in C order - size = len(elems_w) - if w_dtype is None or space.is_w(w_dtype, space.w_None): - w_dtype = None + if dtype is None: for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + dtype) + if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + if dtype is None: + dtype = interp_dtype.get_dtype_cache(space).w_float64dtype shapelen = len(shape) - arr_iter = ArrayIterator(arr.size) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin + arr = W_NDimArray(shape[:], dtype=dtype, order=order) + arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, + dtype.setitem(arr, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1159,22 +1221,22 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(1)) - arr = W_NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(shape[:], dtype=dtype) one = dtype.box(1) - arr.dtype.fill(arr.storage, one, 0, size) + arr.dtype.fill(arr.storage, one, 0, arr.size) return space.wrap(arr) @unwrap_spec(arr=BaseArray, skipna=bool, keepdims=bool) @@ -1222,13 +1284,13 @@ "array dimensions must agree except for axis being concatenated")) elif i == axis: shape[i] += axis_size - res = W_NDimArray(support.product(shape), shape, dtype, 'C') + res = W_NDimArray(shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: chunks[axis] = Chunk(axis_start, axis_start + arr.shape[axis], 1, arr.shape[axis]) - res.create_slice(chunks).setslice(space, arr) + Chunks(chunks).apply(res).setslice(space, arr) axis_start += arr.shape[axis] return res @@ -1316,6 +1378,7 @@ std = interp2app(BaseArray.descr_std), fill = interp2app(BaseArray.descr_fill), + tostring = interp2app(BaseArray.descr_tostring), copy = interp2app(BaseArray.descr_copy), flatten = interp2app(BaseArray.descr_flatten), @@ -1338,7 +1401,7 @@ self.iter = sig.create_frame(arr).get_final_iter() self.base = arr self.index = 0 - ViewArray.__init__(self, arr.size, [arr.size], arr.dtype, arr.order, + ViewArray.__init__(self, [arr.get_size()], arr.dtype, arr.order, arr) def descr_next(self, space): @@ -1353,7 +1416,7 @@ return self def descr_len(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) def descr_index(self, space): return space.wrap(self.index) @@ -1371,28 +1434,26 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) # setslice would have been better, but flat[u:v] for arbitrary # shapes of array a cannot be represented as a[x1:x2, y1:y2] basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) if lngth <2: return base.getitem(basei.offset) - ri = ArrayIterator(lngth) - res = W_NDimArray(lngth, [lngth], base.dtype, - base.order) + res = W_NDimArray([lngth], base.dtype, base.order) + ri = res.create_iter() while not ri.done(): flat_get_driver.jit_merge_point(shapelen=shapelen, base=base, basei=basei, step=step, res=res, - ri=ri, - ) + ri=ri) w_val = base.getitem(basei.offset) - res.setitem(ri.offset,w_val) + res.setitem(ri.offset, w_val) basei = basei.next_skip_x(shapelen, step) ri = ri.next(shapelen) return res @@ -1403,27 +1464,28 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) - ai = 0 + ri = arr.create_iter() basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) while lngth > 0: flat_set_driver.jit_merge_point(shapelen=shapelen, - basei=basei, - base=base, - step=step, - arr=arr, - ai=ai, - lngth=lngth, - ) - v = arr.getitem(ai).convert_to(base.dtype) + basei=basei, + base=base, + step=step, + arr=arr, + lngth=lngth, + ri=ri) + v = arr.getitem(ri.offset).convert_to(base.dtype) base.setitem(basei.offset, v) # need to repeat input values until all assignments are done - ai = (ai + 1) % arr.size basei = basei.next_skip_x(shapelen, step) + ri = ri.next(shapelen) + # WTF is numpy thinking? + ri.offset %= arr.size lngth -= 1 def create_sig(self): @@ -1431,9 +1493,9 @@ def create_iter(self, transforms=None): return ViewIterator(self.base.start, self.base.strides, - self.base.backstrides, - self.base.shape).apply_transformations(self.base, - transforms) + self.base.backstrides, + self.base.shape).apply_transformations(self.base, + transforms) def descr_base(self, space): return space.wrap(self.base) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -51,9 +51,11 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(num_items, [num_items], dtype=dtype) - for i, val in enumerate(items): - a.dtype.setitem(a.storage, i, val) + a = W_NDimArray([num_items], dtype=dtype) + ai = a.create_iter() + for val in items: + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) return space.wrap(a) @@ -61,6 +63,7 @@ from pypy.module.micronumpy.interp_numarray import W_NDimArray itemsize = dtype.itemtype.get_element_size() + assert itemsize >= 0 if count == -1: count = length / itemsize if length % itemsize != 0: @@ -71,20 +74,23 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(count, [count], dtype=dtype) - fromstring_loop(a, count, dtype, itemsize, s) + a = W_NDimArray([count], dtype=dtype) + fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) -fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', - 'dtype', 's', 'a']) +fromstring_driver = jit.JitDriver(greens=[], reds=['i', 'itemsize', + 'dtype', 'ai', 's', 'a']) -def fromstring_loop(a, count, dtype, itemsize, s): +def fromstring_loop(a, dtype, itemsize, s): i = 0 - while i < count: - fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, - itemsize=itemsize, s=s, i=i) + ai = a.create_iter() + while not ai.done(): + fromstring_driver.jit_merge_point(a=a, dtype=dtype, + itemsize=itemsize, s=s, i=i, + ai=ai) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) - a.dtype.setitem(a.storage, i, val) + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) i += 1 @unwrap_spec(s=str, count=int, sep=str) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(support.product(shape), shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res @@ -314,7 +425,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype @@ -404,6 +515,9 @@ ("greater_equal", "ge", 2, {"comparison_func": True}), ("isnan", "isnan", 1, {"bool_result": True}), ("isinf", "isinf", 1, {"bool_result": True}), + ("isneginf", "isneginf", 1, {"bool_result": True}), + ("isposinf", "isposinf", 1, {"bool_result": True}), + ("isfinite", "isfinite", 1, {"bool_result": True}), ('logical_and', 'logical_and', 2, {'comparison_func': True, 'identity': 1}), @@ -421,12 +535,16 @@ ("negative", "neg", 1), ("absolute", "abs", 1), ("sign", "sign", 1, {"promote_bools": True}), + ("signbit", "signbit", 1, {"bool_result": True}), ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ("exp2", "exp2", 1, {"promote_to_float": True}), + ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), @@ -436,6 +554,7 @@ ("arcsin", "arcsin", 1, {"promote_to_float": True}), ("arccos", "arccos", 1, {"promote_to_float": True}), ("arctan", "arctan", 1, {"promote_to_float": True}), + ("arctan2", "arctan2", 2, {"promote_to_float": True}), ("sinh", "sinh", 1, {"promote_to_float": True}), ("cosh", "cosh", 1, {"promote_to_float": True}), ("tanh", "tanh", 1, {"promote_to_float": True}), @@ -450,6 +569,8 @@ ("log2", "log2", 1, {"promote_to_float": True}), ("log10", "log10", 1, {"promote_to_float": True}), ("log1p", "log1p", 1, {"promote_to_float": True}), + ("logaddexp", "logaddexp", 2, {"promote_to_float": True}), + ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True}), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -4,6 +4,7 @@ ViewTransform, BroadcastTransform from pypy.tool.pairtype import extendabletype from pypy.module.micronumpy.loop import ComputationDone +from pypy.rlib import jit """ Signature specifies both the numpy expression that has been constructed and the assembler to be compiled. This is a very important observation - @@ -142,11 +143,10 @@ from pypy.module.micronumpy.interp_numarray import ConcreteArray concr = arr.get_concrete() assert isinstance(concr, ConcreteArray) - storage = concr.storage if self.iter_no >= len(iterlist): iterlist.append(concr.create_iter(transforms)) if self.array_no >= len(arraylist): - arraylist.append(storage) + arraylist.append(concr) def eval(self, frame, arr): iter = frame.iterators[self.iter_no] @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,31 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) + +class ToStringSignature(Call1): + def __init__(self, dtype, child): + Call1.__init__(self, None, 'tostring', dtype, child) + + @jit.unroll_safe + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import ToStringArray + + assert isinstance(arr, ToStringArray) + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( + self.dtype)) + for i in range(arr.item_size): + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -441,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,14 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError +def enumerate_chunks(chunks): + result = [] + i = -1 + for chunk in chunks: + i += chunk.axis_step + result.append((i, chunk)) + return result + @jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: jit.isconstant(len(chunks)) ) @@ -10,7 +18,7 @@ rstart = start rshape = [] i = -1 - for i, chunk in enumerate(chunks): + for i, chunk in enumerate_chunks(chunks): if chunk.step != 0: rstrides.append(strides[i] * chunk.step) rbackstrides.append(strides[i] * (chunk.lgt - 1) * chunk.step) @@ -38,22 +46,31 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides -def find_shape_and_elems(space, w_iterable): +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if space.issequence_w(w_elem): + return False + return True + +def find_shape_and_elems(space, w_iterable, dtype): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) + is_rec_type = dtype is not None and dtype.is_record_type() while True: new_batch = [] if not batch: return shape, [] - if not space.issequence_w(batch[0]): - for elem in batch: - if space.issequence_w(elem): + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape, batch size = space.len_w(batch[0]) for w_elem in batch: - if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) new_batch += space.listview(w_elem) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,6 +4,8 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) from pypy.module.micronumpy.interp_boxes import W_Float64Box +from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix,\ + byteorder_prefix from pypy.conftest import option import sys @@ -15,14 +17,16 @@ sys.modules['numpypy'] = numpy sys.modules['_numpypy'] = numpy cls.space = gettestobjspace(usemodules=['micronumpy']) + cls.w_non_native_prefix = cls.space.wrap(nonnative_byteorder_prefix) + cls.w_native_prefix = cls.space.wrap(byteorder_prefix) class TestSignature(object): def test_binop_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) - ar2 = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) + ar2 = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(2.0))) sig1 = v1.find_sig() @@ -40,7 +44,7 @@ v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() - bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray([10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.find_sig() is not v1.find_sig() assert v5.find_sig() is not v2.find_sig() @@ -57,7 +61,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.find_sig() is v2.find_sig() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,5 +1,7 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest - +from pypy.interpreter.gateway import interp2app class AppTestDtypes(BaseNumpyAppTest): def test_dtype(self): @@ -12,7 +14,10 @@ assert dtype(d) is d assert dtype(None) is dtype(float) assert dtype('int8').name == 'int8' + assert dtype(int).fields is None + assert dtype(int).names is None raises(TypeError, dtype, 1042) + raises(KeyError, 'dtype(int)["asdasd"]') def test_dtype_eq(self): from _numpypy import dtype @@ -53,13 +58,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from _numpypy import array, False_, True_, int64 + from _numpypy import array, False_, longlong a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], int64) + assert isinstance(a[0], longlong) b = a.copy() - assert isinstance(b[0], int64) + assert isinstance(b[0], longlong) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -81,17 +86,17 @@ assert a[i] is True_ def test_zeros_long(self): - from _numpypy import zeros, int64 + from _numpypy import zeros, longlong a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 0 def test_ones_long(self): - from _numpypy import ones, int64 + from _numpypy import ones, longlong a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 1 def test_overflow(self): @@ -181,17 +186,18 @@ assert dtype("float") is dtype(float) -class AppTestTypes(BaseNumpyAppTest): +class AppTestTypes(BaseNumpyAppTest): def test_abstract_types(self): import _numpypy as numpy raises(TypeError, numpy.generic, 0) raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'signedinteger' instances" + assert 'cannot create' in str(exc.value) + assert 'signedinteger' in str(exc.value) exc = raises(TypeError, numpy.unsignedinteger, 0) - assert str(exc.value) == "cannot create 'unsignedinteger' instances" - + assert 'cannot create' in str(exc.value) + assert 'unsignedinteger' in str(exc.value) raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -296,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -327,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys @@ -404,10 +407,29 @@ assert issubclass(int64, int) assert int_ is int64 + def test_various_types(self): + import _numpypy as numpy + import sys + + assert numpy.int16 is numpy.short + assert numpy.int8 is numpy.byte + assert numpy.bool_ is numpy.bool8 + if sys.maxint == (1 << 63) - 1: + assert numpy.intp is numpy.int64 + else: + assert numpy.intp is numpy.int32 + + def test_mro(self): + import _numpypy as numpy + + assert numpy.int16.__mro__ == (numpy.int16, numpy.signedinteger, + numpy.integer, numpy.number, + numpy.generic, object) + assert numpy.bool_.__mro__ == (numpy.bool_, numpy.generic, object) + def test_operators(self): from operator import truediv from _numpypy import float64, int_, True_, False_ - assert 5 / int_(2) == int_(2) assert truediv(int_(3), int_(2)) == float64(1.5) assert truediv(3, int_(2)) == float64(1.5) @@ -427,9 +449,115 @@ assert int_(3) ^ int_(5) == int_(6) assert True_ ^ False_ is True_ assert 5 ^ int_(3) == int_(6) - assert +int_(3) == int_(3) assert ~int_(3) == int_(-4) - raises(TypeError, lambda: float64(3) & 1) + def test_alternate_constructs(self): + from _numpypy import dtype + nnp = self.non_native_prefix + byteorder = self.native_prefix + assert dtype('i8') == dtype(byteorder + 'i8') == dtype('=i8') # XXX should be equal == dtype(long) + assert dtype(nnp + 'i8') != dtype('i8') + assert dtype(nnp + 'i8').byteorder == nnp + assert dtype('=i8').byteorder == '=' + assert dtype(byteorder + 'i8').byteorder == '=' + + def test_alignment(self): + from _numpypy import dtype + assert dtype('i4').alignment == 4 + + def test_typeinfo(self): + from _numpypy import typeinfo, void, number, int64, bool_ + assert typeinfo['Number'] == number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, -9223372036854775808L, int64) + assert typeinfo['VOID'] == ('V', 20, 0, 1, void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) + +class AppTestStrUnicodeDtypes(BaseNumpyAppTest): + def test_str_unicode(self): + from _numpypy import str_, unicode_, character, flexible, generic + + assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] + assert unicode_.mro() == [unicode_, unicode, basestring, character, flexible, generic, object] + + def test_str_dtype(self): + from _numpypy import dtype, str_ + + raises(TypeError, "dtype('Sx')") + d = dtype('S8') + assert d.itemsize == 8 + assert dtype(str) == dtype('S') + assert d.kind == 'S' + assert d.type is str_ + assert d.name == "string64" + assert d.num == 18 + + def test_unicode_dtype(self): + from _numpypy import dtype, unicode_ + + raises(TypeError, "dtype('Ux')") + d = dtype('U8') + assert d.itemsize == 8 * 4 + assert dtype(unicode) == dtype('U') + assert d.kind == 'U' + assert d.type is unicode_ + assert d.name == "unicode256" + assert d.num == 19 + + def test_string_boxes(self): + from _numpypy import str_ + assert isinstance(str_(3), str_) + + def test_unicode_boxes(self): + from _numpypy import unicode_ + assert isinstance(unicode_(3), unicode) + +class AppTestRecordDtypes(BaseNumpyAppTest): + def test_create(self): + from _numpypy import dtype, void + + raises(ValueError, "dtype([('x', int), ('x', float)])") + d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) + assert d.fields['x'] == (dtype('int32'), 0) + assert d.fields['value'] == (dtype(float), 12) + assert d['x'] == dtype('int32') + assert d.name == "void160" + assert d.num == 20 + assert d.itemsize == 20 + assert d.kind == 'V' + assert d.type is void + assert d.char == 'V' + assert d.names == ("x", "y", "z", "value") + raises(KeyError, 'd["xyz"]') + raises(KeyError, 'd.fields["xyz"]') + + def test_create_from_dict(self): + skip("not yet") + from _numpypy import dtype + d = dtype({'names': ['a', 'b', 'c'], + }) + +class AppTestNotDirect(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + def check_non_native(w_obj, w_obj2): + assert w_obj.storage[0] == w_obj2.storage[1] + assert w_obj.storage[1] == w_obj2.storage[0] + if w_obj.storage[0] == '\x00': + assert w_obj2.storage[1] == '\x00' + assert w_obj2.storage[0] == '\x01' + else: + assert w_obj2.storage[1] == '\x01' + assert w_obj2.storage[0] == '\x00' + cls.w_check_non_native = cls.space.wrap(interp2app(check_non_native)) + if option.runappdirect: + py.test.skip("not a direct test") + + def test_non_native(self): + from _numpypy import array + a = array([1, 2, 3], dtype=self.non_native_prefix + 'i2') + assert a[0] == 1 + assert (a + a)[1] == 4 + self.check_non_native(a, array([1, 2, 3], 'i2')) + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -5,15 +5,23 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy import signature from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.interp_iter import Chunk +from pypy.module.micronumpy.interp_iter import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class MockDtype(object): - def malloc(self, size): - return None + class itemtype(object): + @staticmethod + def malloc(size): + return None + def get_size(self): + return 1 + + +def create_slice(a, chunks): + return Chunks(chunks).apply(a) class TestNumArrayDirect(object): def newslice(self, *args): @@ -29,116 +37,116 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -203,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) @@ -374,6 +394,58 @@ assert a[1] == 0. assert a[3] == 0. + def test_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = array([range(5)]) + assert (a[newaxis] == b).all() + + def test_newaxis_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = array(range(1,5)) + c = array([range(1,5)]) + d = array([[x] for x in range(1,5)]) + + assert (a[1:] == b).all() + assert (a[1:,newaxis] == d).all() + assert (a[newaxis,1:] == c).all() + + def test_newaxis_assign(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + a[newaxis,1] = [2] + assert a[1] == 2 + + def test_newaxis_virtual(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = (a + a)[newaxis] + c = array([[0, 2, 4, 6, 8]]) + assert (b == c).all() + + def test_newaxis_then_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[newaxis] + assert b.shape == (1, 5) + assert (b[0,1:] == a[1:]).all() + + def test_slice_then_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[2:] + assert (b[newaxis] == [[2, 3, 4]]).all() + def test_scalar(self): from _numpypy import array, dtype a = array(3) @@ -434,6 +506,8 @@ a = zeros((4, 2, 3)) a.shape = (12, 2) (a + a).reshape(2, 12) # assert did not explode + a = array([[[[]]]]) + assert a.reshape((0,)).shape == (0,) def test_slice_reshape(self): from _numpypy import zeros, arange @@ -921,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1087,7 +1165,7 @@ assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - assert array([1L, 2, 3]).dtype is dtype(long) + #assert array([1L, 2, 3]).dtype is dtype(long) assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1421,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 @@ -1594,6 +1670,7 @@ a = arange(12).reshape(3,4) b = a.T.flat b[6::2] = [-1, -2] + print a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]] assert (a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]]).all() b[0:2] = [[[100]]] assert(a[0,0] == 100) @@ -1868,6 +1945,12 @@ #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) + def test_tostring(self): + from _numpypy import array + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' class AppTestRanges(BaseNumpyAppTest): def test_arange(self): @@ -1913,3 +1996,57 @@ cache = get_appbridge_cache(cls.space) cache.w_array_repr = cls.old_array_repr cache.w_array_str = cls.old_array_str + +class AppTestRecordDtype(BaseNumpyAppTest): + def test_zeros(self): + from _numpypy import zeros + a = zeros(2, dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]["xyz"]') + assert a[0]['x'] == 0 + assert a[0]['y'] == 0 + raises(ValueError, "a[0] = (1, 2, 3)") + a[0]['x'] = 13 + assert a[0]['x'] == 13 + a[1] = (1, 2) + assert a[1]['y'] == 2 + b = zeros(2, dtype=[('x', int), ('y', float)]) + b[1] = a[1] + assert a[1]['y'] == 2 + + def test_views(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + raises(ValueError, 'array([1])["x"]') + raises(ValueError, 'a["z"]') + assert a['x'][1] == 3 + assert a['y'][1] == 4 + a['x'][0] = 15 + assert a['x'][0] == 15 + b = a['x'] + a['y'] + assert (b == [15+2, 3+4]).all() + assert b.dtype == float + + def test_assign_tuple(self): + from _numpypy import zeros + a = zeros((2, 3), dtype=[('x', int), ('y', float)]) + a[1, 2] = (1, 2) + assert a['x'][1, 2] == 1 + assert a['y'][1, 2] == 2 + + def test_creation_and_repr(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + assert repr(a[0]) == '(1, 2.0)' + + def test_nested_dtype(self): + from _numpypy import zeros + a = [('x', int), ('y', float)] + b = [('x', int), ('y', a)] + arr = zeros(3, dtype=b) + arr[1]['x'] = 15 + assert arr[1]['x'] == 15 + arr[1]['y']['y'] = 3.5 + assert arr[1]['y']['y'] == 3.5 + assert arr[1]['y']['x'] == 0.0 + assert arr[1]['x'] == 15 + diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -113,14 +113,37 @@ assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_true_divide(self): + from _numpypy import array, true_divide + + a = array([0, 1, 2, 3, 4, 1, -1]) + b = array([4, 4, 4, 4, 4, 0, 0]) + c = true_divide(a, b) + assert (c == [0.0, 0.25, 0.5, 0.75, 1.0, float('inf'), float('-inf')]).all() + + assert math.isnan(true_divide(0, 0)) + def test_fabs(self): from _numpypy import array, fabs - from math import fabs as math_fabs + from math import fabs as math_fabs, isnan a = array([-5.0, -0.0, 1.0]) b = fabs(a) for i in range(3): assert b[i] == math_fabs(a[i]) + assert fabs(float('inf')) == float('inf') + assert fabs(float('-inf')) == float('inf') + assert isnan(fabs(float('nan'))) + + def test_fmod(self): + from _numpypy import fmod + import math + + assert fmod(-1e-100, 1e100) == -1e-100 + assert fmod(3, float('inf')) == 3 + assert (fmod([-3, -2, -1, 1, 2, 3], 2) == [-1, 0, -1, 1, 0, 1]).all() + for v in [float('inf'), float('-inf'), float('nan'), float('-nan')]: + assert math.isnan(fmod(v, 2)) def test_minimum(self): from _numpypy import array, minimum @@ -172,6 +195,14 @@ assert a[0] == 1 assert a[1] == 0 + def test_signbit(self): + from _numpypy import signbit, copysign + + assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == + [False, False, False, False, False, False]).all() + assert (signbit([-0, -0.0, -1, -1.0, float('-inf'), -float('nan'), float('-nan')]) == + [False, True, True, True, True, True, True]).all() + def test_reciporocal(self): from _numpypy import array, reciprocal @@ -231,13 +262,46 @@ a = array([-5.0, -0.0, 0.0, 12345678.0, float("inf"), -float('inf'), -12343424.0]) b = exp(a) - for i in range(4): + for i in range(len(a)): try: res = math.exp(a[i]) except OverflowError: res = float('inf') assert b[i] == res + def test_exp2(self): + import math + from _numpypy import array, exp2 + + a = array([-5.0, -0.0, 0.0, 2, 12345678.0, float("inf"), + -float('inf'), -12343424.0]) + b = exp2(a) + for i in range(len(a)): + try: + res = 2 ** a[i] + except OverflowError: + res = float('inf') + assert b[i] == res + + assert exp2(3) == 8 + assert math.isnan(exp2(float("nan"))) + + def test_expm1(self): + import math + from _numpypy import array, expm1 + + a = array([-5.0, -0.0, 0.0, 12345678.0, float("inf"), + -float('inf'), -12343424.0]) + b = expm1(a) + for i in range(4): + try: + res = math.exp(a[i]) - 1 + except OverflowError: + res = float('inf') + assert b[i] == res + + assert expm1(1e-50) == 1e-50 + def test_sin(self): import math from _numpypy import array, sin @@ -310,6 +374,21 @@ b = arctan(a) assert math.isnan(b[0]) + def test_arctan2(self): + import math + from _numpypy import array, arctan2 + + # From the numpy documentation + assert ( + arctan2( + [0., 0., 1., -1., float('inf'), float('inf')], + [0., -0., float('inf'), float('inf'), float('inf'), float('-inf')]) == + [0., math.pi, 0., -0., math.pi/4, 3*math.pi/4]).all() + + a = array([float('nan')]) + b = arctan2(a, 0) + assert math.isnan(b[0]) + def test_sinh(self): import math from _numpypy import array, sinh @@ -415,6 +494,19 @@ for i in range(len(a)): assert b[i] == math.degrees(a[i]) + def test_rad2deg(self): + import math + from _numpypy import rad2deg, array + a = array([ + -181, -180, -179, + 181, 180, 179, + 359, 360, 361, + 400, -1, 0, 1, + float('inf'), float('-inf')]) + b = rad2deg(a) + for i in range(len(a)): + assert b[i] == math.degrees(a[i]) + def test_reduce_errors(self): from _numpypy import sin, add @@ -510,6 +602,26 @@ assert (isinf(array([0.2, float('inf'), float('nan')])) == [False, True, False]).all() assert isinf(array([0.2])).dtype.kind == 'b' + def test_isposinf_isneginf(self): + from _numpypy import isneginf, isposinf + assert isposinf(float('inf')) + assert not isposinf(float('-inf')) + assert not isposinf(float('nan')) + assert not isposinf(0) + assert not isposinf(0.0) + assert isneginf(float('-inf')) + assert not isneginf(float('inf')) + assert not isneginf(float('nan')) + assert not isneginf(0) + assert not isneginf(0.0) + + def test_isfinite(self): + from _numpypy import isfinite + assert (isfinite([0, 0.0, 1e50, -1e-50]) == + [True, True, True, True]).all() + assert (isfinite([float('-inf'), float('inf'), float('-nan'), float('nan')]) == + [False, False, False, False]).all() + def test_logical_ops(self): from _numpypy import logical_and, logical_or, logical_xor, logical_not @@ -544,7 +656,7 @@ assert log1p(float('inf')) == float('inf') assert (log1p([0, 1e-50, math.e - 1]) == [0, 1e-50, 1]).all() - def test_power(self): + def test_power_float(self): import math from _numpypy import power, array a = array([1., 2., 3.]) @@ -558,9 +670,94 @@ for i in range(len(a)): assert c[i] == a[i] ** b[i] + assert power(2, float('inf')) == float('inf') + assert power(float('inf'), float('inf')) == float('inf') + assert power(12345.0, 12345.0) == float('inf') + assert power(-12345.0, 12345.0) == float('-inf') + assert power(-12345.0, 12346.0) == float('inf') + assert math.isnan(power(-1, 1.1)) + assert math.isnan(power(-1, -1.1)) + assert power(-2.0, -1) == -0.5 + assert power(-2.0, -2) == 0.25 + assert power(12345.0, -12345.0) == 0 + assert power(float('-inf'), 2) == float('inf') + assert power(float('-inf'), 2.5) == float('inf') + assert power(float('-inf'), 3) == float('-inf') + + def test_power_int(self): + import math + from _numpypy import power, array + a = array([1, 2, 3]) + b = power(a, 3) + for i in range(len(a)): + assert b[i] == a[i] ** 3 + + a = array([1, 2, 3]) + b = array([1, 2, 3]) + c = power(a, b) + for i in range(len(a)): + assert c[i] == a[i] ** b[i] + + # assert power(12345, 12345) == -9223372036854775808 + # assert power(-12345, 12345) == -9223372036854775808 + # assert power(-12345, 12346) == -9223372036854775808 + assert power(2, 0) == 1 + assert power(2, -1) == 0 + assert power(2, -2) == 0 + assert power(-2, -1) == 0 + assert power(-2, -2) == 0 + assert power(12345, -12345) == 0 + def test_floordiv(self): from _numpypy import floor_divide, array a = array([1., 2., 3., 4., 5., 6., 6.01]) b = floor_divide(a, 2.5) for i in range(len(a)): assert b[i] == a[i] // 2.5 + + def test_logaddexp(self): + import math + from _numpypy import logaddexp + + # From the numpy documentation + prob1 = math.log(1e-50) + prob2 = math.log(2.5e-50) + prob12 = logaddexp(prob1, prob2) + assert math.fabs(-113.87649168120691 - prob12) < 0.000000000001 + + assert logaddexp(0, 0) == math.log(2) + assert logaddexp(float('-inf'), 0) == 0 + assert logaddexp(12345678, 12345678) == float('inf') + + assert math.isnan(logaddexp(float('nan'), 1)) + assert math.isnan(logaddexp(1, float('nan'))) + assert math.isnan(logaddexp(float('nan'), float('inf'))) + assert math.isnan(logaddexp(float('inf'), float('nan'))) + assert logaddexp(float('-inf'), float('-inf')) == float('-inf') + assert logaddexp(float('-inf'), float('inf')) == float('inf') + assert logaddexp(float('inf'), float('-inf')) == float('inf') + assert logaddexp(float('inf'), float('inf')) == float('inf') + + def test_logaddexp2(self): + import math + from _numpypy import logaddexp2 + log2 = math.log(2) + + # From the numpy documentation + prob1 = math.log(1e-50) / log2 + prob2 = math.log(2.5e-50) / log2 + prob12 = logaddexp2(prob1, prob2) + assert math.fabs(-164.28904982231052 - prob12) < 0.000000000001 + + assert logaddexp2(0, 0) == 1 + assert logaddexp2(float('-inf'), 0) == 0 + assert logaddexp2(12345678, 12345678) == float('inf') + + assert math.isnan(logaddexp2(float('nan'), 1)) + assert math.isnan(logaddexp2(1, float('nan'))) + assert math.isnan(logaddexp2(float('nan'), float('inf'))) + assert math.isnan(logaddexp2(float('inf'), float('nan'))) + assert logaddexp2(float('-inf'), float('-inf')) == float('-inf') + assert logaddexp2(float('-inf'), float('inf')) == float('inf') + assert logaddexp2(float('inf'), float('-inf')) == float('inf') + assert logaddexp2(float('inf'), float('inf')) == float('inf') diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,15 +1,20 @@ import functools import math +import struct from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi -from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rlib.objectmodel import specialize, we_are_translated +from pypy.rlib.rarithmetic import widen, byteswap from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.rstruct.runpack import runpack +from pypy.tool.sourcetools import func_with_new_name +from pypy.rlib import jit +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_void': True}) degToRad = math.pi / 180.0 log2 = math.log(2) @@ -59,9 +64,20 @@ return dispatcher class BaseType(object): + _attrs_ = () + def _unimplemented_ufunc(self, *args): raise NotImplementedError + def malloc(self, size): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, size, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True) + + def __repr__(self): + return self.__class__.__name__ + class Primitive(object): _mixin_ = True @@ -76,7 +92,7 @@ assert isinstance(box, self.BoxType) return box.value - def coerce(self, space, w_item): + def coerce(self, space, dtype, w_item): if isinstance(w_item, self.BoxType): return w_item return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) @@ -97,32 +113,41 @@ def default_fromstring(self, space): raise NotImplementedError - def read(self, storage, width, i, offset): - return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset - )) + def _read(self, storage, width, i, offset): + if we_are_translated(): + return libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + return libffi.array_getitem_T(self.T, width, storage, i, offset) - def read_bool(self, storage, width, i, offset): - return bool(self.for_computation( - libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset))) + def read(self, arr, width, i, offset, dtype=None): + return self.box(self._read(arr.storage, width, i, offset)) - def store(self, storage, width, i, offset, box): - value = self.unbox(box) - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + def read_bool(self, arr, width, i, offset): + return bool(self.for_computation(self._read(arr.storage, width, i, offset))) + + def _write(self, storage, width, i, offset, value): + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + + def store(self, arr, width, i, offset, box): + self._write(arr.storage, width, i, offset, self.unbox(box)) def fill(self, storage, width, box, start, stop, offset): value = self.unbox(box) - for i in xrange(start, stop): - libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), - width, storage, i, offset, value - ) + for i in xrange(start, stop, width): + self._write(storage, 1, i, offset, value) def runpack_str(self, s): return self.box(runpack(self.format_code, s)) + def pack_str(self, box): + return struct.pack(self.format_code, self.unbox(box)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -155,6 +180,14 @@ def isinf(self, v): return False + @raw_unary_op + def isneginf(self, v): + return False + + @raw_unary_op + def isposinf(self, v): + return False + @raw_binary_op def eq(self, v1, v2): return v1 == v2 @@ -206,8 +239,31 @@ def min(self, v1, v2): return min(v1, v2) +class NonNativePrimitive(Primitive): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + return byteswap(res) + + def _write(self, storage, width, i, offset, value): + value = byteswap(value) + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + return struct.pack(self.format_code, byteswap(self.unbox(box))) class Bool(BaseType, Primitive): + _attrs_ = () + T = lltype.Bool BoxType = interp_boxes.W_BoolBox format_code = "?" @@ -234,8 +290,7 @@ return space.wrap(self.unbox(w_item)) def str_format(self, box): - value = self.unbox(box) - return "True" if value else "False" + return "True" if self.unbox(box) else "False" def for_computation(self, v): return int(v) @@ -259,15 +314,18 @@ def invert(self, v): return ~v +NonNativeBool = Bool + class Integer(Primitive): _mixin_ = True + def _base_coerce(self, space, w_item): + return self.box(space.int_w(space.call_function(space.w_int, w_item))) def _coerce(self, space, w_item): - return self.box(space.int_w(space.call_function(space.w_int, w_item))) + return self._base_coerce(space, w_item) def str_format(self, box): - value = self.unbox(box) - return str(self.for_computation(value)) + return str(self.for_computation(self.unbox(box))) def for_computation(self, v): return widen(v) @@ -293,6 +351,8 @@ @simple_binary_op def pow(self, v1, v2): + if v2 < 0: + return 0 res = 1 while v2 > 0: if v2 & 1: @@ -337,68 +397,170 @@ def invert(self, v): return ~v +class NonNativeInteger(NonNativePrimitive, Integer): + _mixin_ = True + class Int8(BaseType, Integer): + _attrs_ = () + T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box format_code = "b" +NonNativeInt8 = Int8 class UInt8(BaseType, Integer): + _attrs_ = () + T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box format_code = "B" +NonNativeUInt8 = UInt8 class Int16(BaseType, Integer): + _attrs_ = () + + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + format_code = "h" + +class NonNativeInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.SHORT BoxType = interp_boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): + _attrs_ = () + + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + format_code = "H" + +class NonNativeUInt16(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): + _attrs_ = () + + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + format_code = "i" + +class NonNativeInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.INT BoxType = interp_boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): + _attrs_ = () + + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + format_code = "I" + +class NonNativeUInt32(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.UINT BoxType = interp_boxes.W_UInt32Box format_code = "I" class Long(BaseType, Integer): + _attrs_ = () + + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + +class NonNativeLong(BaseType, NonNativeInteger): + _attrs_ = () + T = rffi.LONG BoxType = interp_boxes.W_LongBox format_code = "l" class ULong(BaseType, Integer): + _attrs_ = () + T = rffi.ULONG BoxType = interp_boxes.W_ULongBox format_code = "L" +class NonNativeULong(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): + _attrs_ = () + T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +class NonNativeInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + format_code = "q" + + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +def _uint64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class UInt64(BaseType, Integer): + _attrs_ = () + T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box format_code = "Q" - def _coerce(self, space, w_item): - try: - return Integer._coerce(self, space, w_item) - except OperationError, e: - if not e.match(space, space.w_OverflowError): - raise - bigint = space.bigint_w(w_item) - try: - value = bigint.toulonglong() - except OverflowError: - raise OperationError(space.w_OverflowError, space.w_None) - return self.box(value) + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + +class NonNativeUInt64(BaseType, NonNativeInteger): + _attrs_ = () + + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + format_code = "Q" + + _coerce = func_with_new_name(_uint64_coerce, '_coerce') class Float(Primitive): _mixin_ = True @@ -407,8 +569,8 @@ return self.box(space.float_w(space.call_function(space.w_float, w_item))) def str_format(self, box): - value = self.unbox(box) - return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + return float2string(self.for_computation(self.unbox(box)), "g", + rfloat.DTSF_STR_PRECISION) def for_computation(self, v): return float(v) @@ -440,7 +602,15 @@ @simple_binary_op def pow(self, v1, v2): - return math.pow(v1, v2) + try: + return math.pow(v1, v2) + except ValueError: + return rfloat.NAN + except OverflowError: + if math.modf(v2)[0] == 0 and math.modf(v2 / 2)[0] != 0: + # Odd integer powers result in the same sign as the base + return rfloat.copysign(rfloat.INFINITY, v1) + return rfloat.INFINITY @simple_binary_op def copysign(self, v1, v2): @@ -452,10 +622,21 @@ return 0.0 return rfloat.copysign(1.0, v) + @raw_unary_op + def signbit(self, v): + return rfloat.copysign(1.0, v) < 0.0 + @simple_unary_op def fabs(self, v): return math.fabs(v) + @simple_binary_op + def fmod(self, v1, v2): + try: + return math.fmod(v1, v2) + except ValueError: + return rfloat.NAN + @simple_unary_op def reciprocal(self, v): if v == 0.0: @@ -478,6 +659,20 @@ return rfloat.INFINITY @simple_unary_op + def exp2(self, v): + try: + return math.pow(2, v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def expm1(self, v): + try: + return rfloat.expm1(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op def sin(self, v): return math.sin(v) @@ -505,6 +700,10 @@ def arctan(self, v): return math.atan(v) + @simple_binary_op + def arctan2(self, v1, v2): + return math.atan2(v1, v2) + @simple_unary_op def sinh(self, v): return math.sinh(v) @@ -550,6 +749,18 @@ def isinf(self, v): return rfloat.isinf(v) + @raw_unary_op + def isneginf(self, v): + return rfloat.isinf(v) and v < 0 + + @raw_unary_op + def isposinf(self, v): + return rfloat.isinf(v) and v > 0 + + @raw_unary_op + def isfinite(self, v): + return not (rfloat.isinf(v) or rfloat.isnan(v)) + @simple_unary_op def radians(self, v): return v * degToRad @@ -601,13 +812,200 @@ except ValueError: return rfloat.NAN + @simple_binary_op + def logaddexp(self, v1, v2): + try: + v1e = math.exp(v1) + except OverflowError: + v1e = rfloat.INFINITY + try: + v2e = math.exp(v2) + except OverflowError: + v2e = rfloat.INFINITY + + v12e = v1e + v2e + try: + return math.log(v12e) + except ValueError: + if v12e == 0.0: + # CPython raises ValueError here, so we have to check + # the value to find the correct numpy return value + return -rfloat.INFINITY + return rfloat.NAN + + @simple_binary_op + def logaddexp2(self, v1, v2): + try: + v1e = math.pow(2, v1) + except OverflowError: + v1e = rfloat.INFINITY + try: + v2e = math.pow(2, v2) + except OverflowError: + v2e = rfloat.INFINITY + + v12e = v1e + v2e + try: + return math.log(v12e) / log2 + except ValueError: + if v12e == 0.0: + # CPython raises ValueError here, so we have to check + # the value to find the correct numpy return value + return -rfloat.INFINITY + return rfloat.NAN + +class NonNativeFloat(NonNativePrimitive, Float): + _mixin_ = True + + def _read(self, storage, width, i, offset): + if we_are_translated(): + res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset) + else: + res = libffi.array_getitem_T(self.T, width, storage, i, offset) + #return byteswap(res) + return res + + def _write(self, storage, width, i, offset, value): + #value = byteswap(value) XXX + if we_are_translated(): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value) + else: + libffi.array_setitem_T(self.T, width, storage, i, offset, value) + + def pack_str(self, box): + # XXX byteswap + return struct.pack(self.format_code, self.unbox(box)) + class Float32(BaseType, Float): + _attrs_ = () + T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box format_code = "f" +class NonNativeFloat32(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + format_code = "f" + class Float64(BaseType, Float): + _attrs_ = () + T = rffi.DOUBLE BoxType = interp_boxes.W_Float64Box format_code = "d" + +class NonNativeFloat64(BaseType, NonNativeFloat): + _attrs_ = () + + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box + format_code = "d" + +class BaseStringType(object): + _mixin_ = True + + def __init__(self, size=0): + self.size = size + + def get_element_size(self): + return self.size * rffi.sizeof(self.T) + +class StringType(BaseType, BaseStringType): + T = lltype.Char + +class VoidType(BaseType, BaseStringType): + T = lltype.Char + +NonNativeVoidType = VoidType +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + +class RecordType(BaseType): + + T = lltype.Char + + def __init__(self, offsets_and_fields, size): + self.offsets_and_fields = offsets_and_fields + self.size = size + + def get_element_size(self): + return self.size + + def read(self, arr, width, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def coerce(self, space, dtype, w_item): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + if isinstance(w_item, interp_boxes.W_VoidBox): + return w_item + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(self.offsets_and_fields) != space.int_w(space.len(w_item)): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + # XXX optimize it out one day, but for now we just allocate an + # array + arr = W_NDimArray([1], dtype) + for i in range(len(items_w)): + subdtype = dtype.fields[dtype.fieldnames[i]][1] + ofs, itemtype = self.offsets_and_fields[i] + w_item = items_w[i] + w_box = itemtype.coerce(space, subdtype, w_item) + itemtype.store(arr, 1, 0, ofs, w_box) + return interp_boxes.W_VoidBox(arr, 0, arr.dtype) + + @jit.unroll_safe + def store(self, arr, _, i, ofs, box): + assert isinstance(box, interp_boxes.W_VoidBox) + for k in range(self.get_element_size()): + arr.storage[k + i] = box.arr.storage[k + box.ofs] + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + pieces = ["("] + first = True + for ofs, tp in self.offsets_and_fields: + if first: + first = False + else: + pieces.append(", ") + pieces.append(tp.str_format(tp.read(box.arr, 1, box.ofs, ofs))) + pieces.append(")") + return "".join(pieces) + +for tp in [Int32, Int64]: + if tp.T == lltype.Signed: + IntP = tp + break +for tp in [UInt32, UInt64]: + if tp.T == lltype.Unsigned: + UIntP = tp + break +del tp + +def _setup(): + # compute alignment + for tp in globals().values(): + if isinstance(tp, type) and hasattr(tp, 'T'): + tp.alignment = clibffi.cast_type_to_ffitype(tp.T).c_alignment +_setup() +del _setup diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -128,3 +128,82 @@ loop, = log.loops_by_filename(self.filepath) ops = loop.ops_by_id('look') assert 'call' not in log.opnames(ops) + + #XXX the following tests only work with strategies enabled + + def test_should_not_create_intobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_intobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_list_from_string(self): + def main(n): + i = 0 + l = [] + while i < n: + l = list("abc" * i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_set_from_list(self): + def main(n): + i = 0 + while i < n: + s = set([1,2,3]) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -34,7 +34,7 @@ assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([ 0., 0., 0., ..., 0., 0., 0.])" - a = array(range(5), long) + a = array(range(5), int) if a.dtype.itemsize == int_size: assert repr(a) == "array([0, 1, 2, 3, 4])" else: @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -207,6 +207,11 @@ is_arguments(args) return w_some_obj() + def get_and_call_function(space, w_descr, w_obj, *args_w): + args = argument.Arguments(space, list(args_w)) + w_impl = space.get(w_descr, w_obj) + return space.call_args(w_impl, args) + def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,6 +545,8 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): + continue if isinstance(n, (str, unicode)) and len(n) == 1: continue assert n != 'default', ( diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -117,7 +117,7 @@ else: return Constant(tuple(content)) - def newlist(self, args_w): + def newlist(self, args_w, sizehint=None): if self.concrete_mode: content = [self.unwrap(w_arg) for w_arg in args_w] return Constant(content) diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -849,16 +849,25 @@ c.co_filename, c.co_name, c.co_firstlineno, c.co_lnotab) + def patch_opcodes(self, *opcodes): + flow_meth_names = flowcontext.FlowSpaceFrame.opcode_method_names + pyframe_meth_names = PyFrame.opcode_method_names + for name in opcodes: + num = bytecode_spec.opmap[name] + setattr(self, 'old_' + name, flow_meth_names[num]) + flow_meth_names[num] = pyframe_meth_names[num] + + def unpatch_opcodes(self, *opcodes): + flow_meth_names = flowcontext.FlowSpaceFrame.opcode_method_names + for name in opcodes: + num = bytecode_spec.opmap[name] + flow_meth_names[num] = getattr(self, 'old_' + name) + def test_callmethod_opcode(self): """ Tests code generated by pypy-c compiled with CALL_METHOD bytecode """ - flow_meth_names = flowcontext.FlowSpaceFrame.opcode_method_names - pyframe_meth_names = PyFrame.opcode_method_names - for name in ['CALL_METHOD', 'LOOKUP_METHOD']: - num = bytecode_spec.opmap[name] - locals()['old_' + name] = flow_meth_names[num] - flow_meth_names[num] = pyframe_meth_names[num] + self.patch_opcodes('CALL_METHOD', 'LOOKUP_METHOD') try: class X: def m(self): @@ -878,9 +887,31 @@ assert all_ops['simple_call'] == 2 assert all_ops['getattr'] == 1 finally: - for name in ['CALL_METHOD', 'LOOKUP_METHOD']: - num = bytecode_spec.opmap[name] - flow_meth_names[num] = locals()['old_' + name] + self.unpatch_opcodes('CALL_METHOD', 'LOOKUP_METHOD') + + def test_build_list_from_arg_opcode(self): + """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG + bytecode + """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") + self.patch_opcodes('BUILD_LIST_FROM_ARG') + try: + def f(): + return [i for i in "abc"] + + # this code is generated by pypy-c when compiling above f + pypy_code = 'd\x01\x00\xcb\x00\x00D]\x0c\x00}\x00\x00|\x00\x00^\x02\x00q\x07\x00S' + new_c = self.monkey_patch_code(f.func_code, 3, 67, pypy_code, (), + ('i',)) + f2 = new.function(new_c, locals(), 'f') + + graph = self.codetest(f2) + all_ops = self.all_operations(graph) + assert all_ops == {'newlist': 1, 'getattr': 1, 'simple_call': 1, + 'iter': 1, 'next': 1} + finally: + self.unpatch_opcodes('BUILD_LIST_FROM_ARG') def test_dont_capture_RuntimeError(self): class Foo: diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -111,9 +111,15 @@ length = len(data) start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - newdata = [data[start + i*step] for i in range(slicelength)] + if step == 1 and 0 <= start <= stop: + newdata = data[start:stop] + else: + newdata = _getitem_slice_multistep(data, start, step, slicelength) return W_BytearrayObject(newdata) +def _getitem_slice_multistep(data, start, step, slicelength): + return [data[start + i*step] for i in range(slicelength)] + def contains__Bytearray_Int(space, w_bytearray, w_char): char = space.int_w(w_char) if not 0 <= char < 256: diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,10 +127,10 @@ def iter(self, w_dict): return ModuleDictIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): + def w_keys(self, w_dict): space = self.space - iterator = self.unerase(w_dict.dstorage).iteritems - return [space.wrap(key) for key, cell in iterator()] + l = self.unerase(w_dict.dstorage).keys() + return space.newlist_str(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -9,6 +9,7 @@ from pypy.rlib.rfloat import ( formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) from pypy.rlib import jit +from pypy.rlib.rarithmetic import intmask import math @@ -173,7 +174,7 @@ def hash__Complex(space, w_value): hashreal = _hash_float(space, w_value.realval) hashimg = _hash_float(space, w_value.imagval) - combined = hashreal + 1000003 * hashimg + combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) def add__Complex_Complex(space, w_complex1, w_complex2): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -90,9 +90,9 @@ def _add_indirections(): dict_methods = "setitem setitem_str getitem \ getitem_str delitem length \ - clear keys values \ + clear w_keys values \ items iter setdefault \ - popitem".split() + popitem listview_str listview_int".split() def make_method(method): def f(self, *args): @@ -113,7 +113,7 @@ def get_empty_storage(self): raise NotImplementedError - def keys(self, w_dict): + def w_keys(self, w_dict): iterator = self.iter(w_dict) result = [] while 1: @@ -121,7 +121,7 @@ if w_key is not None: result.append(w_key) else: - return result + return self.space.newlist(result) def values(self, w_dict): iterator = self.iter(w_dict) @@ -160,6 +160,11 @@ w_dict.strategy = strategy w_dict.dstorage = storage + def listview_str(self, w_dict): + return None + + def listview_int(self, w_dict): + return None class EmptyDictStrategy(DictStrategy): @@ -371,8 +376,9 @@ self.switch_to_object_strategy(w_dict) return w_dict.getitem(w_key) - def keys(self, w_dict): - return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + def w_keys(self, w_dict): + l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + return self.space.newlist(l) def values(self, w_dict): return self.unerase(w_dict.dstorage).values() @@ -425,8 +431,8 @@ def iter(self, w_dict): return ObjectIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): - return self.unerase(w_dict.dstorage).keys() + def w_keys(self, w_dict): + return self.space.newlist(self.unerase(w_dict.dstorage).keys()) class StringDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -469,9 +475,15 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) + def listview_str(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + def iter(self, w_dict): return StrIteratorImplementation(self.space, self, w_dict) + def w_keys(self, w_dict): + return self.space.newlist_str(self.listview_str(w_dict)) + class _WrappedIteratorMixin(object): _mixin_ = True @@ -534,6 +546,14 @@ def iter(self, w_dict): return IntIteratorImplementation(self.space, self, w_dict) + def listview_int(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + + def w_keys(self, w_dict): + # XXX there is no space.newlist_int yet + space = self.space + return space.call_function(space.w_list, w_dict) + class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation): pass @@ -688,7 +708,7 @@ return space.newlist(w_self.items()) def dict_keys__DictMulti(space, w_self): - return space.newlist(w_self.keys()) + return w_self.w_keys() def dict_values__DictMulti(space, w_self): return space.newlist(w_self.values()) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -76,7 +76,7 @@ def keys(self, w_dict): space = self.space - return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()] + return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -62,8 +62,14 @@ w_fill = space.w_None if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - for w_key in space.listview(w_keys): - w_dict.setitem(w_key, w_fill) + + strlist = space.listview_str(w_keys) + if strlist is not None: + for key in strlist: + w_dict.setitem_str(key, w_fill) + else: + for w_key in space.listview(w_keys): + w_dict.setitem(w_key, w_fill) else: w_dict = space.call_function(w_type) for w_key in space.listview(w_keys): diff --git a/pypy/objspace/std/frozensettype.py b/pypy/objspace/std/frozensettype.py --- a/pypy/objspace/std/frozensettype.py +++ b/pypy/objspace/std/frozensettype.py @@ -39,13 +39,11 @@ def descr__frozenset__new__(space, w_frozensettype, w_iterable=gateway.NoneNotWrapped): from pypy.objspace.std.setobject import W_FrozensetObject - from pypy.objspace.std.setobject import make_setdata_from_w_iterable if (space.is_w(w_frozensettype, space.w_frozenset) and w_iterable is not None and type(w_iterable) is W_FrozensetObject): return w_iterable w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype) - data = make_setdata_from_w_iterable(space, w_iterable) - W_FrozensetObject.__init__(w_obj, space, data) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj frozenset_typedef = StdTypeDef("frozenset", diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -22,29 +22,28 @@ index = self.index w_length = space.len(self.w_seq) w_len = space.sub(w_length, space.wrap(index)) - if space.is_true(space.lt(w_len,space.wrap(0))): + if space.is_true(space.lt(w_len, space.wrap(0))): w_len = space.wrap(0) return w_len class W_SeqIterObject(W_AbstractSeqIterObject): """Sequence iterator implementation for general sequences.""" -class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing - directly their RPython-level list of wrapped objects. +class W_FastListIterObject(W_AbstractSeqIterObject): # XXX still needed + """Sequence iterator specialized for lists. """ class W_FastTupleIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for tuples, accessing - directly their RPython-level list of wrapped objects. - """ - def __init__(w_self, w_seq, wrappeditems): + """Sequence iterator specialized for tuples, accessing directly + their RPython-level list of wrapped objects. + """ + def __init__(w_self, w_seq, wrappeditems): W_AbstractSeqIterObject.__init__(w_self, w_seq) w_self.tupleitems = wrappeditems class W_ReverseSeqIterObject(W_Object): from pypy.objspace.std.itertype import reverse_iter_typedef as typedef - + def __init__(w_self, space, w_seq, index=-1): w_self.w_seq = w_seq w_self.w_len = space.len(w_seq) @@ -61,15 +60,15 @@ def next__SeqIter(space, w_seqiter): if w_seqiter.w_seq is None: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) - w_seqiter.index += 1 + raise OperationError(space.w_StopIteration, space.w_None) + w_seqiter.index += 1 return w_item # XXX __length_hint__() @@ -89,7 +88,7 @@ except IndexError: w_seqiter.tupleitems = None w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -112,7 +111,7 @@ w_item = w_seq.getitem(index) except IndexError: w_seqiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item @@ -126,15 +125,15 @@ def next__ReverseSeqIter(space, w_seqiter): if w_seqiter.w_seq is None or w_seqiter.index < 0: - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) - w_seqiter.index -= 1 + w_seqiter.index -= 1 except OperationError, e: w_seqiter.w_seq = None if not e.match(space, space.w_IndexError): raise - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) return w_item # XXX __length_hint__() diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,6 +139,16 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) + def clear(self, space): + """Initializes (or overrides) the listobject as empty.""" + self.space = space + if space.config.objspace.std.withliststrategies: + strategy = space.fromcache(EmptyListStrategy) + else: + strategy = space.fromcache(ObjectListStrategy) + self.strategy = strategy + strategy.clear(self) + def clone(self): """Returns a clone by creating a new listobject with the same strategy and a copy of the storage""" @@ -200,6 +210,11 @@ """ Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None. """ return self.strategy.getitems_str(self) + + def getitems_int(self): + """ Return the items in the list as unwrapped ints. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_int(self) # ___________________________________________________ @@ -300,6 +315,9 @@ def getitems_str(self, w_list): return None + def getitems_int(self, w_list): + return None + def getstorage_copy(self, w_list): raise NotImplementedError @@ -358,6 +376,9 @@ assert len(list_w) == 0 w_list.lstorage = self.erase(None) + def clear(self, w_list): + w_list.lstorage = self.erase(None) + erase, unerase = rerased.new_erasing_pair("empty") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -516,6 +537,9 @@ raise IndexError return start + i * step + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + def getitem(self, w_list, i): return self.wrap(self._getitem_unwrapped(w_list, i)) @@ -696,6 +720,7 @@ for i in l: if i == obj: return True + return False return ListStrategy.contains(self, w_list, w_obj) def length(self, w_list): @@ -937,6 +962,9 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.erase(list_w) + def clear(self, w_list): + w_list.lstorage = self.erase([]) + def contains(self, w_list, w_obj): return ListStrategy.contains(self, w_list, w_obj) @@ -970,6 +998,9 @@ if reverse: l.reverse() + def getitems_int(self, w_list): + return self.unerase(w_list.lstorage) + class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -1027,37 +1058,49 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) init_defaults = [None] def init__List(space, w_list, __args__): - from pypy.objspace.std.tupleobject import W_TupleObject + from pypy.objspace.std.tupleobject import W_AbstractTupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - w_list.__init__(space, []) + w_list.clear(space) if w_iterable is not None: - # unfortunately this is duplicating space.unpackiterable to avoid - # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastListIterObject optimization. - if isinstance(w_iterable, W_ListObject): - w_list.extend(w_iterable) - elif isinstance(w_iterable, W_TupleObject): - w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:])) - else: - _init_from_iterable(space, w_list, w_iterable) + if type(w_iterable) is W_ListObject: + w_iterable.copy_into(w_list) + return + elif isinstance(w_iterable, W_AbstractTupleObject): + w_list.__init__(space, w_iterable.getitems_copy()) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + w_list.strategy = strategy = space.fromcache(IntegerListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(intlist[:]) + return + + strlist = space.listview_str(w_iterable) + if strlist is not None: + w_list.strategy = strategy = space.fromcache(StringListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(strlist[:]) + return + + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterable, GeneratorIterator): + w_iterable.unpack_into_w(w_list) + return + # /xxx + _init_from_iterable(space, w_list, w_iterable) def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List - # xxx special hack for speed - from pypy.interpreter.generator import GeneratorIterator - if isinstance(w_iterable, GeneratorIterator): - w_iterable.unpack_into_w(w_list) - return - # /xxx w_iterator = space.iter(w_iterable) while True: try: diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, space, []) + w_obj.clear(space) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -694,6 +694,8 @@ self.delitem(w_dict, w_key) return (w_key, w_value) + # XXX could implement a more efficient w_keys based on space.newlist_str + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -227,10 +227,7 @@ return W_ComplexObject(x.real, x.imag) if isinstance(x, set): - rdict_w = r_dict(self.eq_w, self.hash_w) - for item in x: - rdict_w[self.wrap(item)] = None - res = W_SetObject(self, rdict_w) + res = W_SetObject(self, self.newlist([self.wrap(item) for item in x])) return res if isinstance(x, frozenset): @@ -325,7 +322,7 @@ def newset(self): from pypy.objspace.std.setobject import newset - return W_SetObject(self, newset(self)) + return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): return W_SliceObject(w_start, w_end, w_step) @@ -403,7 +400,7 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) @@ -417,7 +414,7 @@ """ if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.tolist() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() else: @@ -438,7 +435,7 @@ return self.fixedview(w_obj, expected_length, unroll=True) def listview(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: t = w_obj.getitems() elif isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() @@ -449,8 +446,25 @@ return t def listview_str(self, w_obj): - if isinstance(w_obj, W_ListObject): + # note: uses exact type checking for objects with strategies, + # and isinstance() for others. See test_listobject.test_uses_custom... + if type(w_obj) is W_ListObject: return w_obj.getitems_str() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_str() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_str() + if isinstance(w_obj, W_StringObject): + return w_obj.listview_str() + return None + + def listview_int(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems_int() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_int() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_int() return None def sliceindices(self, w_slice, w_length): diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -7,6 +7,12 @@ from pypy.interpreter.argument import Signature from pypy.objspace.std.settype import set_typedef as settypedef from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef +from pypy.rlib import rerased +from pypy.rlib.objectmodel import instantiate +from pypy.interpreter.generator import GeneratorIterator +from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.stringobject import W_StringObject class W_BaseSetObject(W_Object): typedef = None @@ -20,88 +26,859 @@ return True return False - - def __init__(w_self, space, setdata): + def __init__(w_self, space, w_iterable=None): """Initialize the set by taking ownership of 'setdata'.""" - assert setdata is not None - w_self.setdata = setdata + w_self.space = space + set_strategy_and_setdata(space, w_self, w_iterable) def __repr__(w_self): """representation for debugging purposes""" - reprlist = [repr(w_item) for w_item in w_self.setdata.keys()] + reprlist = [repr(w_item) for w_item in w_self.getkeys()] return "<%s(%s)>" % (w_self.__class__.__name__, ', '.join(reprlist)) + def from_storage_and_strategy(w_self, storage, strategy): + obj = w_self._newobj(w_self.space, None) + assert isinstance(obj, W_BaseSetObject) + obj.strategy = strategy + obj.sstorage = storage + return obj + From noreply at buildbot.pypy.org Tue Mar 27 04:20:59 2012 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 27 Mar 2012 04:20:59 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: do not release the GIL when calling into CINT to allow TPython callbacks Message-ID: <20120327022059.B414D820D9@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54018:fd1ecb74a56e Date: 2012-03-26 19:20 -0700 http://bitbucket.org/pypy/pypy/changeset/fd1ecb74a56e/ Log: do not release the GIL when calling into CINT to allow TPython callbacks diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/cppyy/capi/__init__.py @@ -5,6 +5,7 @@ #import cint_capi as backend identify = backend.identify +threadsafe = backend.threadsafe _C_OPAQUE_PTR = rffi.LONG _C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO @@ -36,92 +37,112 @@ _c_resolve_name = rffi.llexternal( "cppyy_resolve_name", [rffi.CCHARP], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_resolve_name(cppitem_name): return charp2str_free(_c_resolve_name(cppitem_name)) c_get_scope = rffi.llexternal( "cppyy_get_scope", [rffi.CCHARP], C_SCOPE, + threadsafe=threadsafe, compilation_info=backend.eci) c_get_template = rffi.llexternal( "cppyy_get_template", [rffi.CCHARP], C_TYPE, + threadsafe=threadsafe, compilation_info=backend.eci) c_get_object_type = rffi.llexternal( "cppyy_get_object_type", [C_TYPE, C_OBJECT], C_TYPE, + threadsafe=threadsafe, compilation_info=backend.eci) # memory management ---------------------------------------------------------- c_allocate = rffi.llexternal( "cppyy_allocate", [C_TYPE], C_OBJECT, + threadsafe=threadsafe, compilation_info=backend.eci) c_deallocate = rffi.llexternal( "cppyy_deallocate", [C_TYPE, C_OBJECT], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) c_destruct = rffi.llexternal( "cppyy_destruct", [C_TYPE, C_OBJECT], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) # method/function dispatching ------------------------------------------------ c_call_v = rffi.llexternal( "cppyy_call_v", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_b = rffi.llexternal( "cppyy_call_b", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_c = rffi.llexternal( "cppyy_call_c", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CHAR, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_h = rffi.llexternal( "cppyy_call_h", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.SHORT, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_i = rffi.llexternal( "cppyy_call_i", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_l = rffi.llexternal( "cppyy_call_l", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONG, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_f = rffi.llexternal( "cppyy_call_f", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.DOUBLE, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_d = rffi.llexternal( "cppyy_call_d", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.DOUBLE, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_r = rffi.llexternal( "cppyy_call_r", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.VOIDP, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_s = rffi.llexternal( "cppyy_call_s", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) c_constructor = rffi.llexternal( "cppyy_constructor", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) c_call_o = rffi.llexternal( "cppyy_call_o", [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, C_TYPE], rffi.LONG, + threadsafe=threadsafe, compilation_info=backend.eci) c_get_methptr_getter = rffi.llexternal( "cppyy_get_methptr_getter", [C_SCOPE, rffi.INT], C_METHPTRGETTER_PTR, + threadsafe=threadsafe, compilation_info=backend.eci, elidable_function=True) @@ -129,19 +150,23 @@ c_allocate_function_args = rffi.llexternal( "cppyy_allocate_function_args", [rffi.SIZE_T], rffi.VOIDP, + threadsafe=threadsafe, compilation_info=backend.eci) c_deallocate_function_args = rffi.llexternal( "cppyy_deallocate_function_args", [rffi.VOIDP], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) c_function_arg_sizeof = rffi.llexternal( "cppyy_function_arg_sizeof", [], rffi.SIZE_T, + threadsafe=threadsafe, compilation_info=backend.eci, elidable_function=True) c_function_arg_typeoffset = rffi.llexternal( "cppyy_function_arg_typeoffset", [], rffi.SIZE_T, + threadsafe=threadsafe, compilation_info=backend.eci, elidable_function=True) @@ -149,36 +174,43 @@ c_is_namespace = rffi.llexternal( "cppyy_is_namespace", [C_SCOPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) # type/class reflection information ------------------------------------------ _c_final_name = rffi.llexternal( "cppyy_final_name", [C_TYPE], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_final_name(cpptype): return charp2str_free(_c_final_name(cpptype)) _c_scoped_final_name = rffi.llexternal( "cppyy_scoped_final_name", [C_TYPE], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_scoped_final_name(cpptype): return charp2str_free(_c_scoped_final_name(cpptype)) c_has_complex_hierarchy = rffi.llexternal( "cppyy_has_complex_hierarchy", [C_TYPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_num_bases = rffi.llexternal( "cppyy_num_bases", [C_TYPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) _c_base_name = rffi.llexternal( "cppyy_base_name", [C_TYPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_base_name(cpptype, base_index): return charp2str_free(_c_base_name(cpptype, base_index)) @@ -186,6 +218,7 @@ _c_is_subtype = rffi.llexternal( "cppyy_is_subtype", [C_TYPE, C_TYPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci, elidable_function=True) @@ -198,6 +231,7 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", [C_TYPE, C_TYPE, C_OBJECT], rffi.SIZE_T, + threadsafe=threadsafe, compilation_info=backend.eci, elidable_function=True) @@ -211,36 +245,43 @@ c_num_methods = rffi.llexternal( "cppyy_num_methods", [C_SCOPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) _c_method_name = rffi.llexternal( "cppyy_method_name", [C_SCOPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_method_name(cppscope, method_index): return charp2str_free(_c_method_name(cppscope, method_index)) _c_method_result_type = rffi.llexternal( "cppyy_method_result_type", [C_SCOPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_method_result_type(cppscope, method_index): return charp2str_free(_c_method_result_type(cppscope, method_index)) c_method_num_args = rffi.llexternal( "cppyy_method_num_args", [C_SCOPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_method_req_args = rffi.llexternal( "cppyy_method_req_args", [C_SCOPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) _c_method_arg_type = rffi.llexternal( "cppyy_method_arg_type", [C_SCOPE, rffi.INT, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_method_arg_type(cppscope, method_index, arg_index): return charp2str_free(_c_method_arg_type(cppscope, method_index, arg_index)) _c_method_arg_default = rffi.llexternal( "cppyy_method_arg_default", [C_SCOPE, rffi.INT, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_method_arg_default(cppscope, method_index, arg_index): return charp2str_free(_c_method_arg_default(cppscope, method_index, arg_index)) @@ -248,62 +289,74 @@ c_get_method = rffi.llexternal( "cppyy_get_method", [C_SCOPE, rffi.INT], C_METHOD, + threadsafe=threadsafe, compilation_info=backend.eci) # method properties ---------------------------------------------------------- c_is_constructor = rffi.llexternal( "cppyy_is_constructor", [C_TYPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_is_staticmethod = rffi.llexternal( "cppyy_is_staticmethod", [C_TYPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) # data member reflection information ----------------------------------------- c_num_data_members = rffi.llexternal( "cppyy_num_data_members", [C_SCOPE], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) _c_data_member_name = rffi.llexternal( "cppyy_data_member_name", [C_SCOPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_data_member_name(cppscope, data_member_index): return charp2str_free(_c_data_member_name(cppscope, data_member_index)) _c_data_member_type = rffi.llexternal( "cppyy_data_member_type", [C_SCOPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, compilation_info=backend.eci) def c_data_member_type(cppscope, data_member_index): return charp2str_free(_c_data_member_type(cppscope, data_member_index)) c_data_member_offset = rffi.llexternal( "cppyy_data_member_offset", [C_SCOPE, rffi.INT], rffi.SIZE_T, + threadsafe=threadsafe, compilation_info=backend.eci) # data member properties ----------------------------------------------------- c_is_publicdata = rffi.llexternal( "cppyy_is_publicdata", [C_SCOPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) c_is_staticdata = rffi.llexternal( "cppyy_is_staticdata", [C_SCOPE, rffi.INT], rffi.INT, + threadsafe=threadsafe, compilation_info=backend.eci) # misc helpers --------------------------------------------------------------- c_strtoll = rffi.llexternal( "cppyy_strtoll", [rffi.CCHARP], rffi.LONGLONG, + threadsafe=threadsafe, compilation_info=backend.eci) c_strtoull = rffi.llexternal( "cppyy_strtoull", [rffi.CCHARP], rffi.ULONGLONG, + threadsafe=threadsafe, compilation_info=backend.eci) c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) def charp2str_free(charp): @@ -315,12 +368,15 @@ c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", [rffi.CCHARP], C_OBJECT, + threadsafe=threadsafe, compilation_info=backend.eci) c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, + threadsafe=threadsafe, compilation_info=backend.eci) c_free_stdstring = rffi.llexternal( "cppyy_free_stdstring", [C_OBJECT], lltype.Void, + threadsafe=threadsafe, compilation_info=backend.eci) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -20,6 +20,8 @@ def identify(): return 'CINT' +threadsafe = False + # force loading in global mode of core libraries, rather than linking with # them as PyPy uses various version of dlopen in various places; note that # this isn't going to fly on Windows (note that locking them in objects and diff --git a/pypy/module/cppyy/capi/reflex_capi.py b/pypy/module/cppyy/capi/reflex_capi.py --- a/pypy/module/cppyy/capi/reflex_capi.py +++ b/pypy/module/cppyy/capi/reflex_capi.py @@ -19,6 +19,8 @@ def identify(): return 'Reflex' +threadsafe='auto' + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("reflexcwrapper.cxx")], include_dirs=[incpath] + rootincpath, From noreply at buildbot.pypy.org Tue Mar 27 05:09:04 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:04 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20120327030904.815A4820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r54019:3ba2af39c3e3 Date: 2012-03-25 00:49 +0100 http://bitbucket.org/pypy/pypy/changeset/3ba2af39c3e3/ Log: merge default diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -484,7 +484,9 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', - 'paddq', 'pinsr', 'pmul', 'psrl', 'vmul', + 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers From noreply at buildbot.pypy.org Tue Mar 27 05:09:05 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:05 +0200 (CEST) Subject: [pypy-commit] pypy default: re-enabled win32 tests to make amaury happy. win64 still disabled Message-ID: <20120327030905.C18C0820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r54020:a1cf137dd2bd Date: 2012-03-27 04:28 +0200 http://bitbucket.org/pypy/pypy/changeset/a1cf137dd2bd/ Log: re-enabled win32 tests to make amaury happy. win64 still disabled diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -7,10 +7,17 @@ from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform as compiler +from pypy.rlib.rarithmetic import is_emulated_long from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop +_MSVC = compiler.name == "msvc" +_MINGW = compiler.name == "mingw32" +_WIN32 = _MSVC or _MINGW +_WIN64 = _WIN32 and is_emulated_long +# XXX get rid of 'is_emulated_long' and have a real config here. + class AbstractTestAsmGCRoot: # the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved # instructions: @@ -18,8 +25,8 @@ @classmethod def make_config(cls): - if compiler.name == "msvc": - py.test.skip("all asmgcroot tests disabled for MSVC") + if _MSVC and _WIN64: + py.test.skip("all asmgcroot tests disabled for MSVC X64") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy From noreply at buildbot.pypy.org Tue Mar 27 05:09:09 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:09 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20120327030909.2015D820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r54021:ceb003ff6d6f Date: 2012-03-27 03:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ceb003ff6d6f/ Log: Merge with default diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -62,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -269,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -83,8 +83,9 @@ return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -514,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -714,11 +725,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): @@ -727,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -773,8 +790,9 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -786,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -793,8 +817,9 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right @@ -832,8 +857,13 @@ Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig class ToStringArray(Call1): def __init__(self, child): @@ -842,9 +872,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, @@ -950,7 +980,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,17 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) class ToStringSignature(Call1): def __init__(self, dtype, child): @@ -327,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -455,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -995,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1495,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -128,3 +128,82 @@ loop, = log.loops_by_filename(self.filepath) ops = loop.ops_by_id('look') assert 'call' not in log.opnames(ops) + + #XXX the following tests only work with strategies enabled + + def test_should_not_create_intobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_intobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_list_from_string(self): + def main(n): + i = 0 + l = [] + while i < n: + l = list("abc" * i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_set_from_list(self): + def main(n): + i = 0 + while i < n: + s = set([1,2,3]) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -111,9 +111,15 @@ length = len(data) start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - newdata = [data[start + i*step] for i in range(slicelength)] + if step == 1 and 0 <= start <= stop: + newdata = data[start:stop] + else: + newdata = _getitem_slice_multistep(data, start, step, slicelength) return W_BytearrayObject(newdata) +def _getitem_slice_multistep(data, start, step, slicelength): + return [data[start + i*step] for i in range(slicelength)] + def contains__Bytearray_Int(space, w_bytearray, w_char): char = space.int_w(w_char) if not 0 <= char < 256: diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,10 +127,10 @@ def iter(self, w_dict): return ModuleDictIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): + def w_keys(self, w_dict): space = self.space - iterator = self.unerase(w_dict.dstorage).iteritems - return [space.wrap(key) for key, cell in iterator()] + l = self.unerase(w_dict.dstorage).keys() + return space.newlist_str(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -90,9 +90,9 @@ def _add_indirections(): dict_methods = "setitem setitem_str getitem \ getitem_str delitem length \ - clear keys values \ + clear w_keys values \ items iter setdefault \ - popitem".split() + popitem listview_str listview_int".split() def make_method(method): def f(self, *args): @@ -113,7 +113,7 @@ def get_empty_storage(self): raise NotImplementedError - def keys(self, w_dict): + def w_keys(self, w_dict): iterator = self.iter(w_dict) result = [] while 1: @@ -121,7 +121,7 @@ if w_key is not None: result.append(w_key) else: - return result + return self.space.newlist(result) def values(self, w_dict): iterator = self.iter(w_dict) @@ -160,6 +160,11 @@ w_dict.strategy = strategy w_dict.dstorage = storage + def listview_str(self, w_dict): + return None + + def listview_int(self, w_dict): + return None class EmptyDictStrategy(DictStrategy): @@ -371,8 +376,9 @@ self.switch_to_object_strategy(w_dict) return w_dict.getitem(w_key) - def keys(self, w_dict): - return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + def w_keys(self, w_dict): + l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + return self.space.newlist(l) def values(self, w_dict): return self.unerase(w_dict.dstorage).values() @@ -425,8 +431,8 @@ def iter(self, w_dict): return ObjectIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): - return self.unerase(w_dict.dstorage).keys() + def w_keys(self, w_dict): + return self.space.newlist(self.unerase(w_dict.dstorage).keys()) class StringDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -469,9 +475,15 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) + def listview_str(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + def iter(self, w_dict): return StrIteratorImplementation(self.space, self, w_dict) + def w_keys(self, w_dict): + return self.space.newlist_str(self.listview_str(w_dict)) + class _WrappedIteratorMixin(object): _mixin_ = True @@ -534,6 +546,14 @@ def iter(self, w_dict): return IntIteratorImplementation(self.space, self, w_dict) + def listview_int(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + + def w_keys(self, w_dict): + # XXX there is no space.newlist_int yet + space = self.space + return space.call_function(space.w_list, w_dict) + class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation): pass @@ -688,7 +708,7 @@ return space.newlist(w_self.items()) def dict_keys__DictMulti(space, w_self): - return space.newlist(w_self.keys()) + return w_self.w_keys() def dict_values__DictMulti(space, w_self): return space.newlist(w_self.values()) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -76,7 +76,7 @@ def keys(self, w_dict): space = self.space - return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()] + return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -62,8 +62,14 @@ w_fill = space.w_None if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - for w_key in space.listview(w_keys): - w_dict.setitem(w_key, w_fill) + + strlist = space.listview_str(w_keys) + if strlist is not None: + for key in strlist: + w_dict.setitem_str(key, w_fill) + else: + for w_key in space.listview(w_keys): + w_dict.setitem(w_key, w_fill) else: w_dict = space.call_function(w_type) for w_key in space.listview(w_keys): diff --git a/pypy/objspace/std/frozensettype.py b/pypy/objspace/std/frozensettype.py --- a/pypy/objspace/std/frozensettype.py +++ b/pypy/objspace/std/frozensettype.py @@ -39,13 +39,11 @@ def descr__frozenset__new__(space, w_frozensettype, w_iterable=gateway.NoneNotWrapped): from pypy.objspace.std.setobject import W_FrozensetObject - from pypy.objspace.std.setobject import make_setdata_from_w_iterable if (space.is_w(w_frozensettype, space.w_frozenset) and w_iterable is not None and type(w_iterable) is W_FrozensetObject): return w_iterable w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype) - data = make_setdata_from_w_iterable(space, w_iterable) - W_FrozensetObject.__init__(w_obj, space, data) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj frozenset_typedef = StdTypeDef("frozenset", diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -29,9 +29,8 @@ class W_SeqIterObject(W_AbstractSeqIterObject): """Sequence iterator implementation for general sequences.""" -class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing directly their - RPython-level list of wrapped objects. +class W_FastListIterObject(W_AbstractSeqIterObject): # XXX still needed + """Sequence iterator specialized for lists. """ class W_FastTupleIterObject(W_AbstractSeqIterObject): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,6 +139,16 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) + def clear(self, space): + """Initializes (or overrides) the listobject as empty.""" + self.space = space + if space.config.objspace.std.withliststrategies: + strategy = space.fromcache(EmptyListStrategy) + else: + strategy = space.fromcache(ObjectListStrategy) + self.strategy = strategy + strategy.clear(self) + def clone(self): """Returns a clone by creating a new listobject with the same strategy and a copy of the storage""" @@ -200,6 +210,11 @@ """ Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None. """ return self.strategy.getitems_str(self) + + def getitems_int(self): + """ Return the items in the list as unwrapped ints. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_int(self) # ___________________________________________________ @@ -300,6 +315,9 @@ def getitems_str(self, w_list): return None + def getitems_int(self, w_list): + return None + def getstorage_copy(self, w_list): raise NotImplementedError @@ -358,6 +376,9 @@ assert len(list_w) == 0 w_list.lstorage = self.erase(None) + def clear(self, w_list): + w_list.lstorage = self.erase(None) + erase, unerase = rerased.new_erasing_pair("empty") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -516,6 +537,9 @@ raise IndexError return start + i * step + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + def getitem(self, w_list, i): return self.wrap(self._getitem_unwrapped(w_list, i)) @@ -696,6 +720,7 @@ for i in l: if i == obj: return True + return False return ListStrategy.contains(self, w_list, w_obj) def length(self, w_list): @@ -937,6 +962,9 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.erase(list_w) + def clear(self, w_list): + w_list.lstorage = self.erase([]) + def contains(self, w_list, w_obj): return ListStrategy.contains(self, w_list, w_obj) @@ -970,6 +998,9 @@ if reverse: l.reverse() + def getitems_int(self, w_list): + return self.unerase(w_list.lstorage) + class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -1027,37 +1058,49 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) init_defaults = [None] def init__List(space, w_list, __args__): - from pypy.objspace.std.tupleobject import W_TupleObject + from pypy.objspace.std.tupleobject import W_AbstractTupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - w_list.__init__(space, []) + w_list.clear(space) if w_iterable is not None: - # unfortunately this is duplicating space.unpackiterable to avoid - # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastListIterObject optimization. - if isinstance(w_iterable, W_ListObject): - w_list.extend(w_iterable) - elif isinstance(w_iterable, W_TupleObject): - w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:])) - else: - _init_from_iterable(space, w_list, w_iterable) + if type(w_iterable) is W_ListObject: + w_iterable.copy_into(w_list) + return + elif isinstance(w_iterable, W_AbstractTupleObject): + w_list.__init__(space, w_iterable.getitems_copy()) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + w_list.strategy = strategy = space.fromcache(IntegerListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(intlist[:]) + return + + strlist = space.listview_str(w_iterable) + if strlist is not None: + w_list.strategy = strategy = space.fromcache(StringListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(strlist[:]) + return + + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterable, GeneratorIterator): + w_iterable.unpack_into_w(w_list) + return + # /xxx + _init_from_iterable(space, w_list, w_iterable) def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List - # xxx special hack for speed - from pypy.interpreter.generator import GeneratorIterator - if isinstance(w_iterable, GeneratorIterator): - w_iterable.unpack_into_w(w_list) - return - # /xxx w_iterator = space.iter(w_iterable) while True: try: diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, space, []) + w_obj.clear(space) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -694,6 +694,8 @@ self.delitem(w_dict, w_key) return (w_key, w_value) + # XXX could implement a more efficient w_keys based on space.newlist_str + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -227,10 +227,7 @@ return W_ComplexObject(x.real, x.imag) if isinstance(x, set): - rdict_w = r_dict(self.eq_w, self.hash_w) - for item in x: - rdict_w[self.wrap(item)] = None - res = W_SetObject(self, rdict_w) + res = W_SetObject(self, self.newlist([self.wrap(item) for item in x])) return res if isinstance(x, frozenset): @@ -325,7 +322,7 @@ def newset(self): from pypy.objspace.std.setobject import newset - return W_SetObject(self, newset(self)) + return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): return W_SliceObject(w_start, w_end, w_step) @@ -403,7 +400,7 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) @@ -417,7 +414,7 @@ """ if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.tolist() - elif isinstance(w_obj, W_ListObject): + elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() else: @@ -438,7 +435,7 @@ return self.fixedview(w_obj, expected_length, unroll=True) def listview(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: t = w_obj.getitems() elif isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy() @@ -449,8 +446,25 @@ return t def listview_str(self, w_obj): - if isinstance(w_obj, W_ListObject): + # note: uses exact type checking for objects with strategies, + # and isinstance() for others. See test_listobject.test_uses_custom... + if type(w_obj) is W_ListObject: return w_obj.getitems_str() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_str() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_str() + if isinstance(w_obj, W_StringObject): + return w_obj.listview_str() + return None + + def listview_int(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems_int() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_int() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_int() return None def sliceindices(self, w_slice, w_length): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -7,6 +7,12 @@ from pypy.interpreter.argument import Signature from pypy.objspace.std.settype import set_typedef as settypedef from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef +from pypy.rlib import rerased +from pypy.rlib.objectmodel import instantiate +from pypy.interpreter.generator import GeneratorIterator +from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.stringobject import W_StringObject class W_BaseSetObject(W_Object): typedef = None @@ -20,88 +26,859 @@ return True return False - - def __init__(w_self, space, setdata): + def __init__(w_self, space, w_iterable=None): """Initialize the set by taking ownership of 'setdata'.""" - assert setdata is not None - w_self.setdata = setdata + w_self.space = space + set_strategy_and_setdata(space, w_self, w_iterable) def __repr__(w_self): """representation for debugging purposes""" - reprlist = [repr(w_item) for w_item in w_self.setdata.keys()] + reprlist = [repr(w_item) for w_item in w_self.getkeys()] return "<%s(%s)>" % (w_self.__class__.__name__, ', '.join(reprlist)) + def from_storage_and_strategy(w_self, storage, strategy): + obj = w_self._newobj(w_self.space, None) + assert isinstance(obj, W_BaseSetObject) + obj.strategy = strategy + obj.sstorage = storage + return obj + _lifeline_ = None def getweakref(self): return self._lifeline_ + def setweakref(self, space, weakreflifeline): self._lifeline_ = weakreflifeline def delweakref(self): self._lifeline_ = None + def switch_to_object_strategy(self, space): + d = self.strategy.getdict_w(self) + self.strategy = strategy = space.fromcache(ObjectSetStrategy) + self.sstorage = strategy.erase(d) + + def switch_to_empty_strategy(self): + self.strategy = strategy = self.space.fromcache(EmptySetStrategy) + self.sstorage = strategy.get_empty_storage() + + # _____________ strategy methods ________________ + + + def clear(self): + """ Removes all elements from the set. """ + self.strategy.clear(self) + + def copy_real(self): + """ Returns a clone of the set. Frozensets storages are also copied.""" + return self.strategy.copy_real(self) + + def length(self): + """ Returns the number of items inside the set. """ + return self.strategy.length(self) + + def add(self, w_key): + """ Adds an element to the set. The element must be wrapped. """ + self.strategy.add(self, w_key) + + def remove(self, w_item): + """ Removes the given element from the set. Element must be wrapped. """ + return self.strategy.remove(self, w_item) + + def getdict_w(self): + """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ + return self.strategy.getdict_w(self) + + def listview_str(self): + """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ + return self.strategy.listview_str(self) + + def listview_int(self): + """ If this is an int set return its contents as a list of uwnrapped ints. Otherwise return None. """ + return self.strategy.listview_int(self) + + def get_storage_copy(self): + """ Returns a copy of the storage. Needed when we want to clone all elements from one set and + put them into another. """ + return self.strategy.get_storage_copy(self) + + def getkeys(self): + """ Returns a list of all elements inside the set. Only used in __repr__. Use as less as possible.""" + return self.strategy.getkeys(self) + + def difference(self, w_other): + """ Returns a set with all items that are in this set, but not in w_other. W_other must be a set.""" + return self.strategy.difference(self, w_other) + + def difference_update(self, w_other): + """ As difference but overwrites the sets content with the result. W_other must be a set.""" + self.strategy.difference_update(self, w_other) + + def symmetric_difference(self, w_other): + """ Returns a set with all items that are either in this set or in w_other, but not in both. W_other must be a set. """ + return self.strategy.symmetric_difference(self, w_other) + + def symmetric_difference_update(self, w_other): + """ As symmetric_difference but overwrites the content of the set with the result. W_other must be a set.""" + self.strategy.symmetric_difference_update(self, w_other) + + def intersect(self, w_other): + """ Returns a set with all items that exists in both sets, this set and in w_other. W_other must be a set. """ + return self.strategy.intersect(self, w_other) + + def intersect_update(self, w_other): + """ Keeps only those elements found in both sets, removing all other elements. W_other must be a set.""" + self.strategy.intersect_update(self, w_other) + + def issubset(self, w_other): + """ Checks wether this set is a subset of w_other. W_other must be a set. """ + return self.strategy.issubset(self, w_other) + + def isdisjoint(self, w_other): + """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. W_other must be a set.""" + return self.strategy.isdisjoint(self, w_other) + + def update(self, w_other): + """ Appends all elements from the given set to this set. W_other must be a set.""" + self.strategy.update(self, w_other) + + def has_key(self, w_key): + """ Checks wether this set contains the given wrapped key.""" + return self.strategy.has_key(self, w_key) + + def equals(self, w_other): + """ Checks wether this set and the given set are equal, i.e. contain the same elements. W_other must be a set.""" + return self.strategy.equals(self, w_other) + + def iter(self): + """ Returns an iterator of the elements from this set. """ + return self.strategy.iter(self) + + def popitem(self): + """ Removes an arbitrary element from the set. May raise KeyError if set is empty.""" + return self.strategy.popitem(self) + class W_SetObject(W_BaseSetObject): from pypy.objspace.std.settype import set_typedef as typedef - def _newobj(w_self, space, rdict_w): - """Make a new set by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new set by taking ownership of 'w_iterable'.""" if type(w_self) is W_SetObject: - return W_SetObject(space, rdict_w) + return W_SetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_SetObject, w_type) - W_SetObject.__init__(w_obj, space, rdict_w) + W_SetObject.__init__(w_obj, space, w_iterable) return w_obj class W_FrozensetObject(W_BaseSetObject): from pypy.objspace.std.frozensettype import frozenset_typedef as typedef hash = 0 - def _newobj(w_self, space, rdict_w): - """Make a new frozenset by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new frozenset by taking ownership of 'w_iterable'.""" if type(w_self) is W_FrozensetObject: - return W_FrozensetObject(space, rdict_w) + return W_FrozensetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_FrozensetObject, w_type) - W_FrozensetObject.__init__(w_obj, space, rdict_w) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj registerimplementation(W_BaseSetObject) registerimplementation(W_SetObject) registerimplementation(W_FrozensetObject) -class W_SetIterObject(W_Object): - from pypy.objspace.std.settype import setiter_typedef as typedef +class SetStrategy(object): + def __init__(self, space): + self.space = space - def __init__(w_self, setdata): - w_self.content = content = setdata - w_self.len = len(content) - w_self.pos = 0 - w_self.iterator = w_self.content.iterkeys() + def get_empty_dict(self): + """ Returns an empty dictionary depending on the strategy. Used to initalize a new storage. """ + raise NotImplementedError - def next_entry(w_self): - for w_key in w_self.iterator: + def get_empty_storage(self): + """ Returns an empty storage (erased) object. Used to initialize an empty set.""" + raise NotImplementedError + + def listview_str(self, w_set): + return None + + def listview_int(self, w_set): + return None + + #def erase(self, storage): + # raise NotImplementedError + + #def unerase(self, storage): + # raise NotImplementedError + + # __________________ methods called on W_SetObject _________________ + + def clear(self, w_set): + raise NotImplementedError + + def copy_real(self, w_set): + raise NotImplementedError + + def length(self, w_set): + raise NotImplementedError + + def add(self, w_set, w_key): + raise NotImplementedError + + def remove(self, w_set, w_item): + raise NotImplementedError + + def getdict_w(self, w_set): + raise NotImplementedError + + def get_storage_copy(self, w_set): + raise NotImplementedError + + def getkeys(self, w_set): + raise NotImplementedError + + def difference(self, w_set, w_other): + raise NotImplementedError + + def difference_update(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference_update(self, w_set, w_other): + raise NotImplementedError + + def intersect(self, w_set, w_other): + raise NotImplementedError + + def intersect_update(self, w_set, w_other): + raise NotImplementedError + + def issubset(self, w_set, w_other): + raise NotImplementedError + + def isdisjoint(self, w_set, w_other): + raise NotImplementedError + + def update(self, w_set, w_other): + raise NotImplementedError + + def has_key(self, w_set, w_key): + raise NotImplementedError + + def equals(self, w_set, w_other): + raise NotImplementedError + + def iter(self, w_set): + raise NotImplementedError + + def popitem(self, w_set): + raise NotImplementedError + +class EmptySetStrategy(SetStrategy): + + erase, unerase = rerased.new_erasing_pair("empty") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(None) + + def is_correct_type(self, w_key): + return False + + def length(self, w_set): + return 0 + + def clear(self, w_set): + pass + + def copy_real(self, w_set): + storage = self.erase(None) + clone = w_set.from_storage_and_strategy(storage, self) + return clone + + def add(self, w_set, w_key): + if type(w_key) is W_IntObject: + strategy = self.space.fromcache(IntegerSetStrategy) + elif type(w_key) is W_StringObject: + strategy = self.space.fromcache(StringSetStrategy) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_empty_storage() + w_set.add(w_key) + + def remove(self, w_set, w_item): + return False + + def getdict_w(self, w_set): + return newset(self.space) + + def get_storage_copy(self, w_set): + return w_set.sstorage + + def getkeys(self, w_set): + return [] + + def has_key(self, w_set, w_key): + return False + + def equals(self, w_set, w_other): + if w_other.strategy is self or w_other.length() == 0: + return True + return False + + def difference(self, w_set, w_other): + return w_set.copy_real() + + def difference_update(self, w_set, w_other): + pass + + def intersect(self, w_set, w_other): + return w_set.copy_real() + + def intersect_update(self, w_set, w_other): + pass + + def isdisjoint(self, w_set, w_other): + return True + + def issubset(self, w_set, w_other): + return True + + def symmetric_difference(self, w_set, w_other): + return w_other.copy_real() + + def symmetric_difference_update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def iter(self, w_set): + return EmptyIteratorImplementation(self.space, w_set) + + def popitem(self, w_set): + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + +class AbstractUnwrappedSetStrategy(object): + _mixin_ = True + + def is_correct_type(self, w_key): + """ Checks wether the given wrapped key fits this strategy.""" + raise NotImplementedError + + def unwrap(self, w_item): + """ Returns the unwrapped value of the given wrapped item.""" + raise NotImplementedError + + def wrap(self, item): + """ Returns a wrapped version of the given unwrapped item. """ + raise NotImplementedError + + def get_storage_from_list(self, list_w): + setdata = self.get_empty_dict() + for w_item in list_w: + setdata[self.unwrap(w_item)] = None + return self.erase(setdata) + + def get_storage_from_unwrapped_list(self, items): + setdata = self.get_empty_dict() + for item in items: + setdata[item] = None + return self.erase(setdata) + + def length(self, w_set): + return len(self.unerase(w_set.sstorage)) + + def clear(self, w_set): + w_set.switch_to_empty_strategy() + + def copy_real(self, w_set): + # may be used internally on frozen sets, although frozenset().copy() + # returns self in frozenset_copy__Frozenset. + strategy = w_set.strategy + d = self.unerase(w_set.sstorage) + storage = self.erase(d.copy()) + clone = w_set.from_storage_and_strategy(storage, strategy) + return clone + + def add(self, w_set, w_key): + if self.is_correct_type(w_key): + d = self.unerase(w_set.sstorage) + d[self.unwrap(w_key)] = None + else: + w_set.switch_to_object_strategy(self.space) + w_set.add(w_key) + + def remove(self, w_set, w_item): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + d = self.unerase(w_set.sstorage) + if not self.is_correct_type(w_item): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.remove(w_item) + + key = self.unwrap(w_item) + try: + del d[key] + return True + except KeyError: + return False + + def getdict_w(self, w_set): + result = newset(self.space) + keys = self.unerase(w_set.sstorage).keys() + for key in keys: + result[self.wrap(key)] = None + return result + + def get_storage_copy(self, w_set): + d = self.unerase(w_set.sstorage) + copy = self.erase(d.copy()) + return copy + + def getkeys(self, w_set): + keys = self.unerase(w_set.sstorage).keys() + keys_w = [self.wrap(key) for key in keys] + return keys_w + + def has_key(self, w_set, w_key): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + if not self.is_correct_type(w_key): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.has_key(w_key) + d = self.unerase(w_set.sstorage) + return self.unwrap(w_key) in d + + def equals(self, w_set, w_other): + if w_set.length() != w_other.length(): + return False + items = self.unerase(w_set.sstorage).keys() + for key in items: + if not w_other.has_key(self.wrap(key)): + return False + return True + + def _difference_wrapped(self, w_set, w_other): + strategy = self.space.fromcache(ObjectSetStrategy) + + d_new = strategy.get_empty_dict() + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + d_new[w_item] = None + + return strategy.erase(d_new) + + def _difference_unwrapped(self, w_set, w_other): + iterator = self.unerase(w_set.sstorage).iterkeys() + other_dict = self.unerase(w_other.sstorage) + result_dict = self.get_empty_dict() + for key in iterator: + if key not in other_dict: + result_dict[key] = None + return self.erase(result_dict) + + def _difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._difference_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = w_set.strategy + storage = w_set.sstorage + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._difference_wrapped(w_set, w_other) + return storage, strategy + + def difference(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_newset = w_set.from_storage_and_strategy(storage, strategy) + return w_newset + + def difference_update(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _symmetric_difference_unwrapped(self, w_set, w_other): + d_new = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_other.keys(): + if not key in d_this: + d_new[key] = None + for key in d_this.keys(): + if not key in d_other: + d_new[key] = None + + storage = self.erase(d_new) + return storage + + def _symmetric_difference_wrapped(self, w_set, w_other): + newsetdata = newset(self.space) + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + newsetdata[w_item] = None + + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + if not w_set.has_key(w_item): + newsetdata[w_item] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(newsetdata) + + def _symmetric_difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._symmetric_difference_unwrapped(w_set, w_other) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._symmetric_difference_wrapped(w_set, w_other) + return storage, strategy + + def symmetric_difference(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def symmetric_difference_update(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _intersect_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = strategy._intersect_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = self.space.fromcache(EmptySetStrategy) + storage = strategy.get_empty_storage() + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._intersect_wrapped(w_set, w_other) + return storage, strategy + + def _intersect_wrapped(self, w_set, w_other): + result = newset(self.space) + for key in self.unerase(w_set.sstorage): + w_key = self.wrap(key) + if w_other.has_key(w_key): + result[w_key] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(result) + + def _intersect_unwrapped(self, w_set, w_other): + result = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_this: + if key in d_other: + result[key] = None + return self.erase(result) + + def intersect(self, w_set, w_other): + if w_set.length() > w_other.length(): + return w_other.intersect(w_set) + + storage, strategy = self._intersect_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def intersect_update(self, w_set, w_other): + if w_set.length() > w_other.length(): + w_intersection = w_other.intersect(w_set) + strategy = w_intersection.strategy + storage = w_intersection.sstorage + else: + storage, strategy = self._intersect_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _issubset_unwrapped(self, w_set, w_other): + d_other = self.unerase(w_other.sstorage) + for item in self.unerase(w_set.sstorage): + if not item in d_other: + return False + return True + + def _issubset_wrapped(self, w_set, w_other): + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + return False + return True + + def issubset(self, w_set, w_other): + if w_set.length() == 0: + return True + + if w_set.strategy is w_other.strategy: + return self._issubset_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return False + else: + return self._issubset_wrapped(w_set, w_other) + + def _isdisjoint_unwrapped(self, w_set, w_other): + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_set: + if key in d_other: + return False + return True + + def _isdisjoint_wrapped(self, w_set, w_other): + d = self.unerase(w_set.sstorage) + for key in d: + if w_other.has_key(self.wrap(key)): + return False + return True + + def isdisjoint(self, w_set, w_other): + if w_other.length() == 0: + return True + if w_set.length() > w_other.length(): + return w_other.isdisjoint(w_set) + + if w_set.strategy is w_other.strategy: + return self._isdisjoint_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return True + else: + return self._isdisjoint_wrapped(w_set, w_other) + + def update(self, w_set, w_other): + if self is w_other.strategy: + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + d_set.update(d_other) + return + + w_set.switch_to_object_strategy(self.space) + w_set.update(w_other) + + def popitem(self, w_set): + storage = self.unerase(w_set.sstorage) + try: + # this returns a tuple because internally sets are dicts + result = storage.popitem() + except KeyError: + # strategy may still be the same even if dict is empty + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + return self.wrap(result[0]) + +class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("string") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_str(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + return type(w_key) is W_StringObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(IntegerSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.str_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return StringIteratorImplementation(self.space, self, w_set) + +class IntegerSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("integer") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_int(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + from pypy.objspace.std.intobject import W_IntObject + return type(w_key) is W_IntObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(StringSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.int_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return IntegerIteratorImplementation(self.space, self, w_set) + +class ObjectSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("object") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(self.get_empty_dict()) + + def get_empty_dict(self): + return newset(self.space) + + def is_correct_type(self, w_key): + return True + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return w_item + + def wrap(self, item): + return item + + def iter(self, w_set): + return RDictIteratorImplementation(self.space, self, w_set) + + def update(self, w_set, w_other): + d_obj = self.unerase(w_set.sstorage) + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + d_obj[w_item] = None + +class IteratorImplementation(object): + def __init__(self, space, implementation): + self.space = space + self.setimplementation = implementation + self.len = implementation.length() + self.pos = 0 + + def next(self): + if self.setimplementation is None: + return None + if self.len != self.setimplementation.length(): + self.len = -1 # Make this error state sticky + raise OperationError(self.space.w_RuntimeError, + self.space.wrap("set changed size during iteration")) + # look for the next entry + if self.pos < self.len: + result = self.next_entry() + self.pos += 1 + return result + # no more entries + self.setimplementation = None + return None + + def next_entry(self): + """ Purely abstract method + """ + raise NotImplementedError + + def length(self): + if self.setimplementation is not None: + return self.len - self.pos + return 0 + +class EmptyIteratorImplementation(IteratorImplementation): + def next_entry(self): + return None + + +class StringIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, w_set): + IteratorImplementation.__init__(self, space, w_set) + d = strategy.unerase(w_set.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class IntegerIteratorImplementation(IteratorImplementation): + #XXX same implementation in dictmultiobject on dictstrategy-branch + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class RDictIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for w_key in self.iterator: return w_key else: return None +class W_SetIterObject(W_Object): + from pypy.objspace.std.settype import setiter_typedef as typedef + # XXX this class should be killed, and the various + # iterimplementations should be W_Objects directly. + + def __init__(w_self, space, iterimplementation): + w_self.space = space + w_self.iterimplementation = iterimplementation + registerimplementation(W_SetIterObject) def iter__SetIterObject(space, w_setiter): return w_setiter def next__SetIterObject(space, w_setiter): - content = w_setiter.content - if content is not None: - if w_setiter.len != len(content): - w_setiter.len = -1 # Make this error state sticky - raise OperationError(space.w_RuntimeError, - space.wrap("Set changed size during iteration")) - # look for the next entry - w_result = w_setiter.next_entry() - if w_result is not None: - w_setiter.pos += 1 - return w_result - # no more entries - w_setiter.content = None + iterimplementation = w_setiter.iterimplementation + w_key = iterimplementation.next() + if w_key is not None: + return w_key raise OperationError(space.w_StopIteration, space.w_None) # XXX __length_hint__() @@ -116,107 +893,91 @@ def newset(space): return r_dict(space.eq_w, space.hash_w, force_non_null=True) -def make_setdata_from_w_iterable(space, w_iterable=None): - """Return a new r_dict with the content of w_iterable.""" +def set_strategy_and_setdata(space, w_set, w_iterable): + from pypy.objspace.std.intobject import W_IntObject + if w_iterable is None : + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + if isinstance(w_iterable, W_BaseSetObject): - return w_iterable.setdata.copy() - data = newset(space) - if w_iterable is not None: - for w_item in space.listview(w_iterable): - data[w_item] = None - return data + w_set.strategy = w_iterable.strategy + w_set.sstorage = w_iterable.get_storage_copy() + return + + stringlist = space.listview_str(w_iterable) + if stringlist is not None: + strategy = space.fromcache(StringSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + strategy = space.fromcache(IntegerSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) + return + + iterable_w = space.listview(w_iterable) + + if len(iterable_w) == 0: + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + + _pick_correct_strategy(space, w_set, iterable_w) + +def _pick_correct_strategy(space, w_set, iterable_w): + # check for integers + for w_item in iterable_w: + if type(w_item) is not W_IntObject: + break + else: + w_set.strategy = space.fromcache(IntegerSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + # check for strings + for w_item in iterable_w: + if type(w_item) is not W_StringObject: + break + else: + w_set.strategy = space.fromcache(StringSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + w_set.strategy = space.fromcache(ObjectSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) def _initialize_set(space, w_obj, w_iterable=None): - w_obj.setdata.clear() - if w_iterable is not None: - w_obj.setdata = make_setdata_from_w_iterable(space, w_iterable) + w_obj.clear() + set_strategy_and_setdata(space, w_obj, w_iterable) def _convert_set_to_frozenset(space, w_obj): - if space.isinstance_w(w_obj, space.w_set): - return W_FrozensetObject(space, - make_setdata_from_w_iterable(space, w_obj)) + if isinstance(w_obj, W_SetObject): + w_frozen = W_FrozensetObject(space, None) + w_frozen.strategy = w_obj.strategy + w_frozen.sstorage = w_obj.sstorage + return w_frozen + elif space.isinstance_w(w_obj, space.w_set): + w_frz = space.allocate_instance(W_FrozensetObject, space.w_frozenset) + W_FrozensetObject.__init__(w_frz, space, w_obj) + return w_frz else: return None -# helper functions for set operation on dicts - -def _is_eq(ld, rd): - if len(ld) != len(rd): - return False - for w_key in ld: - if w_key not in rd: - return False - return True - -def _difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - return result - -def _difference_dict_update(space, ld, rd): - if ld is rd: - ld.clear() # for the case 'a.difference_update(a)' - else: - for w_key in rd: - try: - del ld[w_key] - except KeyError: - pass - -def _intersection_dict(space, ld, rd): - result = newset(space) - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - result[w_key] = None - return result - -def _isdisjoint_dict(ld, rd): - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - return False - return True - -def _symmetric_difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - for w_key in rd: - if w_key not in ld: - result[w_key] = None - return result - -def _issubset_dict(ldict, rdict): - if len(ldict) > len(rdict): - return False - - for w_key in ldict: - if w_key not in rdict: - return False - return True - - -#end helper functions - def set_update__Set(space, w_left, others_w): """Update a set with the union of itself and another.""" - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - ld.update(w_other.setdata) # optimization only + w_left.update(w_other) # optimization only else: for w_key in space.listview(w_other): - ld[w_key] = None + w_left.add(w_key) def inplace_or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - ld.update(rd) + w_left.update(w_other) return w_left inplace_or__Set_Frozenset = inplace_or__Set_Set @@ -226,10 +987,10 @@ This has no effect if the element is already present. """ - w_left.setdata[w_other] = None + w_left.add(w_other) def set_copy__Set(space, w_set): - return w_set._newobj(space, w_set.setdata.copy()) + return w_set.copy_real() def frozenset_copy__Frozenset(space, w_left): if type(w_left) is W_FrozensetObject: @@ -238,63 +999,51 @@ return set_copy__Set(space, w_left) def set_clear__Set(space, w_left): - w_left.setdata.clear() + w_left.clear() def sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + return w_left.difference(w_other) sub__Set_Frozenset = sub__Set_Set sub__Frozenset_Set = sub__Set_Set sub__Frozenset_Frozenset = sub__Set_Set def set_difference__Set(space, w_left, others_w): - result = w_left.setdata - if len(others_w) == 0: - result = result.copy() - for w_other in others_w: - if isinstance(w_other, W_BaseSetObject): - rd = w_other.setdata # optimization only - else: - rd = make_setdata_from_w_iterable(space, w_other) - result = _difference_dict(space, result, rd) - return w_left._newobj(space, result) + result = w_left.copy_real() + set_difference_update__Set(space, result, others_w) + return result frozenset_difference__Frozenset = set_difference__Set def set_difference_update__Set(space, w_left, others_w): - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): # optimization only - _difference_dict_update(space, ld, w_other.setdata) + w_left.difference_update(w_other) else: - for w_key in space.listview(w_other): - try: - del ld[w_key] - except KeyError: - pass + w_other_as_set = w_left._newobj(space, w_other) + w_left.difference_update(w_other_as_set) def inplace_sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - _difference_dict_update(space, ld, rd) + w_left.difference_update(w_other) return w_left inplace_sub__Set_Frozenset = inplace_sub__Set_Set def eq__Set_Set(space, w_left, w_other): # optimization only (the general case is eq__Set_settypedef) - return space.wrap(_is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(w_left.equals(w_other)) eq__Set_Frozenset = eq__Set_Set eq__Frozenset_Frozenset = eq__Set_Set eq__Frozenset_Set = eq__Set_Set def eq__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(_is_eq(w_left.setdata, rd)) + # tested in test_buildinshortcut.py + #XXX do not make new setobject here + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(w_left.equals(w_other_as_set)) eq__Set_frozensettypedef = eq__Set_settypedef eq__Frozenset_settypedef = eq__Set_settypedef @@ -308,15 +1057,16 @@ eq__Frozenset_ANY = eq__Set_ANY def ne__Set_Set(space, w_left, w_other): - return space.wrap(not _is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(not w_left.equals(w_other)) ne__Set_Frozenset = ne__Set_Set ne__Frozenset_Frozenset = ne__Set_Set ne__Frozenset_Set = ne__Set_Set def ne__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(not _is_eq(w_left.setdata, rd)) + #XXX this is not tested + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(not w_left.equals(w_other_as_set)) ne__Set_frozensettypedef = ne__Set_settypedef ne__Frozenset_settypedef = ne__Set_settypedef @@ -331,12 +1081,12 @@ def contains__Set_ANY(space, w_left, w_other): try: - return space.newbool(w_other in w_left.setdata) + return space.newbool(w_left.has_key(w_other)) except OperationError, e: if e.match(space, space.w_TypeError): w_f = _convert_set_to_frozenset(space, w_other) if w_f is not None: - return space.newbool(w_f in w_left.setdata) + return space.newbool(w_left.has_key(w_f)) raise contains__Frozenset_ANY = contains__Set_ANY @@ -345,19 +1095,23 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(ld, rd)) + if w_left.length() > w_other.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other)) set_issubset__Set_Frozenset = set_issubset__Set_Set frozenset_issubset__Frozenset_Set = set_issubset__Set_Set frozenset_issubset__Frozenset_Frozenset = set_issubset__Set_Set def set_issubset__Set_ANY(space, w_left, w_other): - if space.is_w(w_left, w_other): - return space.w_True + # not checking whether w_left is w_other here, because if that were the + # case the more precise multimethod would have applied. - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(ld, rd)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() > w_other_as_set.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other_as_set)) frozenset_issubset__Frozenset_ANY = set_issubset__Set_ANY @@ -370,9 +1124,9 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(rd, ld)) + if w_left.length() < w_other.length(): + return space.w_False + return space.wrap(w_other.issubset(w_left)) set_issuperset__Set_Frozenset = set_issuperset__Set_Set set_issuperset__Frozenset_Set = set_issuperset__Set_Set @@ -382,8 +1136,11 @@ if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(rd, ld)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() < w_other_as_set.length(): + return space.w_False + return space.wrap(w_other_as_set.issubset(w_left)) frozenset_issuperset__Frozenset_ANY = set_issuperset__Set_ANY @@ -395,7 +1152,7 @@ # automatic registration of "lt(x, y)" as "not ge(y, x)" would not give the # correct answer here! def lt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) >= len(w_other.setdata): + if w_left.length() >= w_other.length(): return space.w_False else: return le__Set_Set(space, w_left, w_other) @@ -405,7 +1162,7 @@ lt__Frozenset_Frozenset = lt__Set_Set def gt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) <= len(w_other.setdata): + if w_left.length() <= w_other.length(): return space.w_False else: return ge__Set_Set(space, w_left, w_other) @@ -421,26 +1178,19 @@ Returns True if successfully removed. """ try: - del w_left.setdata[w_item] - return True - except KeyError: - return False + deleted = w_left.remove(w_item) except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_f = _convert_set_to_frozenset(space, w_item) - if w_f is None: - raise + else: + w_f = _convert_set_to_frozenset(space, w_item) + if w_f is None: + raise + deleted = w_left.remove(w_f) - try: - del w_left.setdata[w_f] - return True - except KeyError: - return False - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return False + if w_left.length() == 0: + w_left.switch_to_empty_strategy() + return deleted def set_discard__Set_ANY(space, w_left, w_item): _discard_from_set(space, w_left, w_item) @@ -454,8 +1204,12 @@ if w_set.hash != 0: return space.wrap(w_set.hash) hash = r_uint(1927868237) - hash *= r_uint(len(w_set.setdata) + 1) - for w_item in w_set.setdata: + hash *= r_uint(w_set.length() + 1) + w_iterator = w_set.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break h = space.hash_w(w_item) value = (r_uint(h ^ (h << 16) ^ 89869747) * multi) hash = hash ^ value @@ -468,71 +1222,75 @@ return space.wrap(hash) def set_pop__Set(space, w_left): - try: - w_key, _ = w_left.setdata.popitem() - except KeyError: - raise OperationError(space.w_KeyError, - space.wrap('pop from an empty set')) - return w_key + return w_left.popitem() def and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + new_set = w_left.intersect(w_other) + return new_set and__Set_Frozenset = and__Set_Set and__Frozenset_Set = and__Set_Set and__Frozenset_Frozenset = and__Set_Set -def _intersection_multiple(space, w_left, others_w): - result = w_left.setdata - for w_other in others_w: +def set_intersection__Set(space, w_left, others_w): + #XXX find smarter implementations + others_w = [w_left] + others_w + + # find smallest set in others_w to reduce comparisons + startindex, startlength = 0, -1 + for i in range(len(others_w)): + w_other = others_w[i] + try: + length = space.int_w(space.len(w_other)) + except OperationError, e: + if (e.match(space, space.w_TypeError) or + e.match(space, space.w_AttributeError)): + continue + raise + + if startlength == -1 or length < startlength: + startindex = i + startlength = length + + others_w[startindex], others_w[0] = others_w[0], others_w[startindex] + + result = w_left._newobj(space, others_w[0]) + for i in range(1,len(others_w)): + w_other = others_w[i] if isinstance(w_other, W_BaseSetObject): # optimization only - result = _intersection_dict(space, result, w_other.setdata) + result.intersect_update(w_other) else: - result2 = newset(space) - for w_key in space.listview(w_other): - if w_key in result: - result2[w_key] = None - result = result2 + w_other_as_set = w_left._newobj(space, w_other) + result.intersect_update(w_other_as_set) return result -def set_intersection__Set(space, w_left, others_w): - if len(others_w) == 0: - result = w_left.setdata.copy() - else: - result = _intersection_multiple(space, w_left, others_w) - return w_left._newobj(space, result) - frozenset_intersection__Frozenset = set_intersection__Set def set_intersection_update__Set(space, w_left, others_w): - result = _intersection_multiple(space, w_left, others_w) - w_left.setdata = result + result = set_intersection__Set(space, w_left, others_w) + w_left.strategy = result.strategy + w_left.sstorage = result.sstorage + return def inplace_and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.intersect_update(w_other) return w_left inplace_and__Set_Frozenset = inplace_and__Set_Set def set_isdisjoint__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - disjoint = _isdisjoint_dict(ld, rd) - return space.newbool(disjoint) + return space.newbool(w_left.isdisjoint(w_other)) set_isdisjoint__Set_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Set = set_isdisjoint__Set_Set def set_isdisjoint__Set_ANY(space, w_left, w_other): - ld = w_left.setdata + #XXX may be optimized when other strategies are added for w_key in space.listview(w_other): - if w_key in ld: + if w_left.has_key(w_key): return space.w_False return space.w_True @@ -540,9 +1298,8 @@ def set_symmetric_difference__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_result = w_left.symmetric_difference(w_other) + return w_result set_symmetric_difference__Set_Frozenset = set_symmetric_difference__Set_Set set_symmetric_difference__Frozenset_Set = set_symmetric_difference__Set_Set @@ -556,26 +1313,23 @@ def set_symmetric_difference__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_other_as_set = w_left._newobj(space, w_other) + w_result = w_left.symmetric_difference(w_other_as_set) + return w_result frozenset_symmetric_difference__Frozenset_ANY = \ set_symmetric_difference__Set_ANY def set_symmetric_difference_update__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.symmetric_difference_update(w_other) set_symmetric_difference_update__Set_Frozenset = \ set_symmetric_difference_update__Set_Set def set_symmetric_difference_update__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_other_as_set = w_left._newobj(space, w_other) + w_left.symmetric_difference_update(w_other_as_set) def inplace_xor__Set_Set(space, w_left, w_other): set_symmetric_difference_update__Set_Set(space, w_left, w_other) @@ -584,34 +1338,33 @@ inplace_xor__Set_Frozenset = inplace_xor__Set_Set def or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - result = ld.copy() - result.update(rd) - return w_left._newobj(space, result) + w_copy = w_left.copy_real() + w_copy.update(w_other) + return w_copy or__Set_Frozenset = or__Set_Set or__Frozenset_Set = or__Set_Set or__Frozenset_Frozenset = or__Set_Set def set_union__Set(space, w_left, others_w): - result = w_left.setdata.copy() + result = w_left.copy_real() for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - result.update(w_other.setdata) # optimization only + result.update(w_other) # optimization only else: for w_key in space.listview(w_other): - result[w_key] = None - return w_left._newobj(space, result) + result.add(w_key) + return result frozenset_union__Frozenset = set_union__Set def len__Set(space, w_left): - return space.newint(len(w_left.setdata)) + return space.newint(w_left.length()) len__Frozenset = len__Set def iter__Set(space, w_left): - return W_SetIterObject(w_left.setdata) + return W_SetIterObject(space, w_left.iter()) iter__Frozenset = iter__Set diff --git a/pypy/objspace/std/settype.py b/pypy/objspace/std/settype.py --- a/pypy/objspace/std/settype.py +++ b/pypy/objspace/std/settype.py @@ -68,7 +68,7 @@ def descr__new__(space, w_settype, __args__): from pypy.objspace.std.setobject import W_SetObject, newset w_obj = space.allocate_instance(W_SetObject, w_settype) - W_SetObject.__init__(w_obj, space, newset(space)) + W_SetObject.__init__(w_obj, space) return w_obj set_typedef = StdTypeDef("set", diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -69,6 +69,14 @@ def str_w(w_self, space): return w_self._value + def listview_str(w_self): + return _create_list_from_string(w_self._value) + +def _create_list_from_string(value): + # need this helper function to allow the jit to look inside and inline + # listview_str + return [s for s in value] + registerimplementation(W_StringObject) W_StringObject.EMPTY = W_StringObject('') diff --git a/pypy/objspace/std/test/test_builtinshortcut.py b/pypy/objspace/std/test/test_builtinshortcut.py --- a/pypy/objspace/std/test/test_builtinshortcut.py +++ b/pypy/objspace/std/test/test_builtinshortcut.py @@ -85,6 +85,20 @@ def setup_class(cls): from pypy import conftest cls.space = conftest.gettestobjspace(**WITH_BUILTINSHORTCUT) + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint class AppTestString(test_stringobject.AppTestStringObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -131,6 +131,45 @@ assert self.space.eq_w(space.call_function(get, w("33")), w(None)) assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) + def test_fromkeys_fastpath(self): + space = self.space + w = space.wrap + + w_l = self.space.newlist([w("a"),w("b")]) + w_l.getitems = None + w_d = space.call_method(space.w_dict, "fromkeys", w_l) + + assert space.eq_w(w_d.getitem_str("a"), space.w_None) + assert space.eq_w(w_d.getitem_str("b"), space.w_None) + + def test_listview_str_dict(self): + w = self.space.wrap + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) + + assert self.space.listview_str(w_d) == ["a", "b"] + + def test_listview_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + assert self.space.listview_int(w_d) == [1, 2] + + def test_keys_on_string_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_int(w_l)) == [1,2] + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_str(w_l)) == ["a", "b"] class AppTest_DictObject: def setup_class(cls): @@ -793,7 +832,9 @@ return x == y eq_w = eq def newlist(self, l): - return [] + return l + def newlist_str(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): @@ -933,7 +974,7 @@ def test_keys(self): self.fill_impl() - keys = self.impl.keys() + keys = self.impl.w_keys() # wrapped lists = lists in the fake space keys.sort() assert keys == [self.string, self.string2] self.check_not_devolved() @@ -1011,8 +1052,8 @@ d.setitem("s", 12) d.delitem(F()) - assert "s" not in d.keys() - assert F() not in d.keys() + assert "s" not in d.w_keys() + assert F() not in d.w_keys() class TestStrDictImplementation(BaseTestRDictImplementation): StrategyClass = StringDictStrategy diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -486,6 +486,14 @@ list.__init__(l, ['a', 'b', 'c']) assert l is l0 assert l == ['a', 'b', 'c'] + list.__init__(l) + assert l == [] + + def test_explicit_new_init_more_cases(self): + for assignment in [[], (), [3], ["foo"]]: + l = [1, 2] + l.__init__(assignment) + assert l == list(assignment) def test_extend_list(self): l = l0 = [1] @@ -1173,6 +1181,20 @@ assert l == [] assert list(g) == [] + def test_uses_custom_iterator(self): + # obscure corner case: space.listview*() must not shortcut subclasses + # of dicts, because the OrderedDict in the stdlib relies on this. + # we extend the use case to lists and sets, i.e. all types that have + # strategies, to avoid surprizes depending on the strategy. + for base, arg in [(list, []), (list, [5]), (list, ['x']), + (set, []), (set, [5]), (set, ['x']), + (dict, []), (dict, [(5,6)]), (dict, [('x',7)])]: + print base, arg + class SubClass(base): + def __iter__(self): + return iter("foobar") + assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] + class AppTestForRangeLists(AppTestW_ListObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -420,7 +420,7 @@ def test_listview_str(self): space = self.space - assert space.listview_str(space.wrap("a")) is None + assert space.listview_str(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) assert space.listview_str(w_l) == ["a", "b"] @@ -463,6 +463,44 @@ w_res = listobject.list_pop__List_ANY(space, w_l, space.w_None) # does not crash assert space.unwrap(w_res) == 3 + def test_create_list_from_set(self): + from pypy.objspace.std.setobject import W_SetObject + from pypy.objspace.std.setobject import _initialize_set + + space = self.space + w = space.wrap + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_l) + w_set.iter = None # make sure fast path is used + + w_l2 = W_ListObject(space, []) + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b"), space.wrap("c")]) + _initialize_set(self.space, w_set, w_l) + + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + + def test_listview_str_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) + assert self.space.listview_str(w_l) == ["a", "b"] + + def test_listview_int_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + assert self.space.listview_int(w_l) == [1, 2, 3] + class TestW_ListStrategiesDisabled: def setup_class(cls): diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,12 +8,14 @@ is not too wrong. """ import py.test -from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject +from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set -from pypy.objspace.std.setobject import newset, make_setdata_from_w_iterable +from pypy.objspace.std.setobject import newset from pypy.objspace.std.setobject import and__Set_Set from pypy.objspace.std.setobject import set_intersection__Set from pypy.objspace.std.setobject import eq__Set_Set +from pypy.conftest import gettestobjspace +from pypy.objspace.std.listobject import W_ListObject letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' @@ -29,12 +31,11 @@ self.false = self.space.w_False def test_and(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t0 = W_SetObject(self.space, newset(self.space)) + t0 = W_SetObject(self.space) _initialize_set(self.space, t0, self.otherword) - t1 = W_FrozensetObject(self.space, - make_setdata_from_w_iterable(self.space, self.otherword)) + t1 = W_FrozensetObject(self.space, self.otherword) r0 = and__Set_Set(self.space, s, t0) r1 = and__Set_Set(self.space, s, t1) assert eq__Set_Set(self.space, r0, r1) == self.true @@ -42,9 +43,9 @@ assert eq__Set_Set(self.space, r0, sr) == self.true def test_compare(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t = W_SetObject(self.space, newset(self.space)) + t = W_SetObject(self.space) _initialize_set(self.space, t, self.word) assert self.space.eq_w(s,t) u = self.space.wrap(set('simsalabim')) @@ -54,7 +55,247 @@ s = self.space.newset() assert self.space.str_w(self.space.repr(s)) == 'set([])' + def test_intersection_order(self): + # theses tests make sure that intersection is done in the correct order + # (smallest first) + space = self.space + a = W_SetObject(self.space) + _initialize_set(self.space, a, self.space.wrap("abcdefg")) + a.intersect = None + + b = W_SetObject(self.space) + _initialize_set(self.space, b, self.space.wrap("abc")) + + result = set_intersection__Set(space, a, [b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("abc")))) + + c = W_SetObject(self.space) + _initialize_set(self.space, c, self.space.wrap("e")) + + d = W_SetObject(self.space) + _initialize_set(self.space, d, self.space.wrap("ab")) + + # if ordering works correct we should start with set e + a.get_storage_copy = None + b.get_storage_copy = None + d.get_storage_copy = None + + result = set_intersection__Set(space, a, [d,c,b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) + + def test_create_set_from_list(self): + from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy + from pypy.objspace.std.floatobject import W_FloatObject + from pypy.objspace.std.model import W_Object + + w = self.space.wrap + intstr = self.space.fromcache(IntegerSetStrategy) + tmp_func = intstr.get_storage_from_list + # test if get_storage_from_list is no longer used + intstr.get_storage_from_list = None + + w_list = W_ListObject(self.space, [w(1), w(2), w(3)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is intstr + assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} + + w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} + + w_list = W_ListObject(self.space, [w("1"), w(2), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_Object) + + w_list = W_ListObject(self.space, [w(1.0), w(2.0), w(3.0)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_FloatObject) + + # changed cached object, need to change it back for other tests to pass + intstr.get_storage_from_list = tmp_func + + def test_listview_str_int_on_set(self): + w = self.space.wrap + + w_a = W_SetObject(self.space) + _initialize_set(self.space, w_a, w("abcdefg")) + assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert self.space.listview_int(w_a) is None + + w_b = W_SetObject(self.space) + _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) + assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] + assert self.space.listview_str(w_b) is None + class AppTestAppSetTest: + + def setup_class(self): + self.space = gettestobjspace() + w_fakeint = self.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + self.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + def test_simple(self): + a = set([1,2,3]) + b = set() + b.add(4) + c = a.union(b) + assert c == set([1,2,3,4]) + + def test_generator(self): + def foo(): + for i in [1,2,3,4,5]: + yield i + b = set(foo()) + assert b == set([1,2,3,4,5]) + + a = set(x for x in [1,2,3]) + assert a == set([1,2,3]) + + def test_generator2(self): + def foo(): + for i in [1,2,3]: + yield i + class A(set): + pass + a = A([1,2,3,4,5]) + b = a.difference(foo()) + assert b == set([4,5]) + + def test_or(self): + a = set([0,1,2]) + b = a | set([1,2,3]) + assert b == set([0,1,2,3]) + + # test inplace or + a |= set([1,2,3]) + assert a == b + + def test_clear(self): + a = set([1,2,3]) + a.clear() + assert a == set() + + def test_sub(self): + a = set([1,2,3,4,5]) + b = set([2,3,4]) + a - b == [1,5] + a.__sub__(b) == [1,5] + + #inplace sub + a = set([1,2,3,4]) + b = set([1,4]) + a -= b + assert a == set([2,3]) + + def test_issubset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert b.issubset(a) + c = [1,2,3,4] + assert b.issubset(c) + + a = set([1,2,3,4]) + b = set(['1','2']) + assert not b.issubset(a) + + def test_issuperset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert a.issuperset(b) + c = [2,3] + assert a.issuperset(c) + + c = [1,1,1,1,1] + assert a.issuperset(c) + assert set([1,1,1,1,1]).issubset(a) + + a = set([1,2,3]) + assert a.issuperset(a) + assert not a.issuperset(set([1,2,3,4,5])) + + def test_inplace_and(test): + a = set([1,2,3,4]) + b = set([0,2,3,5,6]) + a &= b + assert a == set([2,3]) + + def test_discard_remove(self): + a = set([1,2,3,4,5]) + a.remove(1) + assert a == set([2,3,4,5]) + a.discard(2) + assert a == set([3,4,5]) + + raises(KeyError, "a.remove(6)") + + def test_pop(self): + b = set() + raises(KeyError, "b.pop()") + + a = set([1,2,3,4,5]) + for i in xrange(5): + a.pop() + assert a == set() + raises(KeyError, "a.pop()") + + def test_symmetric_difference(self): + a = set([1,2,3]) + b = set([3,4,5]) + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = set('abc') + c = a.symmetric_difference(b) + assert c == set([1,2,3,'a','b','c']) + + def test_symmetric_difference_update(self): + a = set([1,2,3]) + b = set([3,4,5]) + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = set([3,4,5]) + a ^= b + assert a == set([1,2,4,5]) + def test_subtype(self): class subset(set):pass a = subset() @@ -131,6 +372,8 @@ assert (set('abc') != set('abcd')) assert (frozenset('abc') != frozenset('abcd')) assert (frozenset('abc') != set('abcd')) + assert set() != set('abc') + assert set('abc') != set('abd') def test_libpython_equality(self): for thetype in [frozenset, set]: @@ -178,6 +421,9 @@ s1 = set('abc') s1.update('d', 'ef', frozenset('g')) assert s1 == set('abcdefg') + s1 = set() + s1.update(set('abcd')) + assert s1 == set('abcd') def test_recursive_repr(self): class A(object): @@ -330,6 +576,7 @@ assert not set([1,2,5]).isdisjoint(frozenset([4,5,6])) assert not set([1,2,5]).isdisjoint([4,5,6]) assert not set([1,2,5]).isdisjoint((4,5,6)) + assert set([1,2,3]).isdisjoint(set([3.5,4.0])) def test_intersection(self): assert set([1,2,3]).intersection(set([2,3,4])) == set([2,3]) @@ -347,6 +594,35 @@ assert s.intersection() == s assert s.intersection() is not s + def test_intersection_swap(self): + s1 = s3 = set([1,2,3,4,5]) + s2 = set([2,3,6,7]) + s1 &= s2 + assert s1 == set([2,3]) + assert s3 == set([2,3]) + + def test_intersection_generator(self): + def foo(): + for i in range(5): + yield i + + s1 = s2 = set([1,2,3,4,5,6]) + assert s1.intersection(foo()) == set([1,2,3,4]) + s1.intersection_update(foo()) + assert s1 == set([1,2,3,4]) + assert s2 == set([1,2,3,4]) + + def test_intersection_string(self): + s = set([1,2,3]) + o = 'abc' + assert s.intersection(o) == set() + + def test_intersection_float(self): + a = set([1,2,3]) + b = set([3.0,4.0,5.0]) + c = a.intersection(b) + assert c == set([3.0]) + def test_difference(self): assert set([1,2,3]).difference(set([2,3,4])) == set([1]) assert set([1,2,3]).difference(frozenset([2,3,4])) == set([1]) @@ -361,6 +637,9 @@ s = set([1,2,3]) assert s.difference() == s assert s.difference() is not s + assert set([1,2,3]).difference(set([2,3,4,'5'])) == set([1]) + assert set([1,2,3,'5']).difference(set([2,3,4])) == set([1,'5']) + assert set().difference(set([1,2,3])) == set() def test_intersection_update(self): s = set([1,2,3,4,7]) @@ -381,3 +660,250 @@ assert s == set([2,3]) s.difference_update(s) assert s == set([]) + + def test_empty_empty(self): + assert set() == set([]) + + def test_empty_difference(self): + e = set() + x = set([1,2,3]) + assert e.difference(x) == set() + assert x.difference(e) == x + + e.difference_update(x) + assert e == set() + x.difference_update(e) + assert x == set([1,2,3]) + + assert e.symmetric_difference(x) == x + assert x.symmetric_difference(e) == x + + e.symmetric_difference_update(e) + assert e == e + e.symmetric_difference_update(x) + assert e == x + + x.symmetric_difference_update(set()) + assert x == set([1,2,3]) + + def test_fastpath_with_strategies(self): + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.difference(b) == a + assert b.difference(a) == b + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.intersection(b) == set() + assert b.intersection(a) == set() + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert not a.issubset(b) + assert not b.issubset(a) + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.isdisjoint(b) + assert b.isdisjoint(a) + + def test_empty_intersect(self): + e = set() + x = set([1,2,3]) + assert e.intersection(x) == e + assert x.intersection(e) == e + assert e & x == e + assert x & e == e + + e.intersection_update(x) + assert e == set() + e &= x + assert e == set() + x.intersection_update(e) + assert x == set() + + def test_empty_issuper(self): + e = set() + x = set([1,2,3]) + assert e.issuperset(e) == True + assert e.issuperset(x) == False + assert x.issuperset(e) == True + + assert e.issuperset(set()) + assert e.issuperset([]) + + def test_empty_issubset(self): + e = set() + x = set([1,2,3]) + assert e.issubset(e) == True + assert e.issubset(x) == True + assert x.issubset(e) == False + assert e.issubset([]) + + def test_empty_isdisjoint(self): + e = set() + x = set([1,2,3]) + assert e.isdisjoint(e) == True + assert e.isdisjoint(x) == True + assert x.isdisjoint(e) == True + + def test_empty_unhashable(self): + s = set() + raises(TypeError, s.difference, [[]]) + raises(TypeError, s.difference_update, [[]]) + raises(TypeError, s.intersection, [[]]) + raises(TypeError, s.intersection_update, [[]]) + raises(TypeError, s.symmetric_difference, [[]]) + raises(TypeError, s.symmetric_difference_update, [[]]) + raises(TypeError, s.update, [[]]) + + def test_super_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).issuperset(foo()) + + def test_isdisjoint_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).isdisjoint(foo()) + + def test_fakeint_and_equals(self): + s1 = set([1,2,3,4]) + s2 = set([1,2,self.FakeInt(3), 4]) + assert s1 == s2 + + def test_fakeint_and_discard(self): + # test with object strategy + s = set([1, 2, 'three', 'four']) + s.discard(self.FakeInt(2)) + assert s == set([1, 'three', 'four']) + + s.remove(self.FakeInt(1)) + assert s == set(['three', 'four']) + raises(KeyError, s.remove, self.FakeInt(16)) + + # test with int strategy + s = set([1,2,3,4]) + s.discard(self.FakeInt(4)) + assert s == set([1,2,3]) + s.remove(self.FakeInt(3)) + assert s == set([1,2]) + raises(KeyError, s.remove, self.FakeInt(16)) + + def test_fakeobject_and_has_key(self): + s = set([1,2,3,4,5]) + assert 5 in s + assert self.FakeInt(5) in s + + def test_fakeobject_and_pop(self): + s = set([1,2,3,self.FakeInt(4),5]) + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s == set([]) + + def test_fakeobject_and_difference(self): + s = set([1,2,'3',4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set(['3',4]) + + s = set([1,2,3,4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set([3,4]) + + def test_frozenset_behavior(self): + s = set([1,2,3,frozenset([4])]) + raises(TypeError, s.difference_update, [1,2,3,set([4])]) + + s = set([1,2,3,frozenset([4])]) + s.discard(set([4])) + assert s == set([1,2,3]) + + def test_discard_unhashable(self): + s = set([1,2,3,4]) + raises(TypeError, s.discard, [1]) + + def test_discard_evil_compare(self): + class Evil(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if isinstance(other, frozenset): + raise TypeError + if other == self.value: + return True + return False + s = set([1,2, Evil(frozenset([1]))]) + raises(TypeError, s.discard, set([1])) + + def test_create_set_from_set(self): + # no sharing + x = set([1,2,3]) + y = set(x) + a = x.pop() + assert y == set([1,2,3]) + assert len(x) == 2 + assert x.union(set([a])) == y + + def test_never_change_frozenset(self): + a = frozenset([1,2]) + b = a.copy() + assert a is b + + a = frozenset([1,2]) + b = a.union(set([3,4])) + assert b == set([1,2,3,4]) + assert a == set([1,2]) + + a = frozenset() + b = a.union(set([3,4])) + assert b == set([3,4]) + assert a == set() + + a = frozenset([1,2])#multiple + b = a.union(set([3,4]),[5,6]) + assert b == set([1,2,3,4,5,6]) + assert a == set([1,2]) + + a = frozenset([1,2,3]) + b = a.difference(set([3,4,5])) + assert b == set([1,2]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.difference(set([3]), [2]) + assert b == set([1]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.symmetric_difference(set([3,4,5])) + assert b == set([1,2,4,5]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.intersection(set([3,4,5])) + assert b == set([3]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.intersection(set([2,3,4]), [2]) + assert b == set([2]) + assert a == set([1,2,3]) + + raises(AttributeError, "frozenset().update()") + raises(AttributeError, "frozenset().difference_update()") + raises(AttributeError, "frozenset().symmetric_difference_update()") + raises(AttributeError, "frozenset().intersection_update()") + + def test_intersection_obj(self): + class Obj: + def __getitem__(self, i): + return [5, 3, 4][i] + s = set([10,3,2]).intersection(Obj()) + assert list(s) == [3] diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -0,0 +1,107 @@ +from pypy.objspace.std.setobject import W_SetObject +from pypy.objspace.std.setobject import IntegerSetStrategy, ObjectSetStrategy, EmptySetStrategy +from pypy.objspace.std.listobject import W_ListObject + +class TestW_SetStrategies: + + def wrapped(self, l): + return W_ListObject(self.space, [self.space.wrap(x) for x in l]) + + def test_from_list(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert s.strategy is self.space.fromcache(IntegerSetStrategy) + + s = W_SetObject(self.space, self.wrapped([1,"two",3,"four",5])) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s = W_SetObject(self.space) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + s = W_SetObject(self.space, self.wrapped([])) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + def test_switch_to_object(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s.add(self.space.wrap("six")) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_symmetric_difference(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.symmetric_difference_update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_intersection(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5, "six", "seven"])) + s3 = s1.intersect(s2) + skip("for now intersection with ObjectStrategy always results in another ObjectStrategy") + assert s3.strategy is self.space.fromcache(IntegerSetStrategy) + + def test_clear(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s1.clear() + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_remove(self): + from pypy.objspace.std.setobject import set_remove__Set_ANY + s1 = W_SetObject(self.space, self.wrapped([1])) + set_remove__Set_ANY(self.space, s1, self.space.wrap(1)) + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_union(self): + from pypy.objspace.std.setobject import set_union__Set + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5,6,7])) + s3 = W_SetObject(self.space, self.wrapped([4,'5','6',7])) + s4 = set_union__Set(self.space, s1, [s2]) + s5 = set_union__Set(self.space, s1, [s3]) + assert s4.strategy is self.space.fromcache(IntegerSetStrategy) + assert s5.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_discard(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + set_discard__Set_ANY(self.space, s1, self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + set_discard__Set_ANY(self.space, s1, self.space.wrap(FakeInt(5))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_has_key(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert not s1.has_key(self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + assert s1.has_key(self.space.wrap(FakeInt(2))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -85,6 +85,10 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) + def test_listview_str(self): + w_str = self.space.wrap('abcd') + assert self.space.listview_str(w_str) == list("abcd") + class AppTestStringObject: def test_format_wrongchar(self): diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -6,10 +6,18 @@ from pypy.annotation.listdef import s_list_of_strings from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform as compiler +from pypy.rlib.rarithmetic import is_emulated_long from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop +_MSVC = compiler.name == "msvc" +_MINGW = compiler.name == "mingw32" +_WIN32 = _MSVC or _MINGW +_WIN64 = _WIN32 and is_emulated_long +# XXX get rid of 'is_emulated_long' and have a real config here. + class AbstractTestAsmGCRoot: # the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved # instructions: @@ -17,6 +25,8 @@ @classmethod def make_config(cls): + if _MSVC and _WIN64: + py.test.skip("all asmgcroot tests disabled for MSVC X64") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy From noreply at buildbot.pypy.org Tue Mar 27 05:09:11 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:11 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20120327030911.9B3D0820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r54022:b7a253cbbe2a Date: 2012-03-27 04:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b7a253cbbe2a/ Log: merge From noreply at buildbot.pypy.org Tue Mar 27 05:09:14 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge with win64-stage1 Message-ID: <20120327030914.03FC1820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r54023:220568adc846 Date: 2012-03-27 03:45 +0100 http://bitbucket.org/pypy/pypy/changeset/220568adc846/ Log: Merge with win64-stage1 From noreply at buildbot.pypy.org Tue Mar 27 05:09:16 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:09:16 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: merge Message-ID: <20120327030916.5F657820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r54024:261d04c68c0f Date: 2012-03-27 05:07 +0200 http://bitbucket.org/pypy/pypy/changeset/261d04c68c0f/ Log: merge From noreply at buildbot.pypy.org Tue Mar 27 05:12:58 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 27 Mar 2012 05:12:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge with win64-stage1 Message-ID: <20120327031258.32CB6820D9@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r54025:a9fc8654a167 Date: 2012-03-27 04:12 +0100 http://bitbucket.org/pypy/pypy/changeset/a9fc8654a167/ Log: Merge with win64-stage1 arghh, had a double head From noreply at buildbot.pypy.org Tue Mar 27 07:28:16 2012 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Mar 2012 07:28:16 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20120327052816.99B82820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54026:e36d713562b8 Date: 2012-03-27 01:21 +0200 http://bitbucket.org/pypy/pypy/changeset/e36d713562b8/ Log: fix diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,8 +773,9 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) - loc1 = self.xrm.force_allocate_reg(op.result) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) From noreply at buildbot.pypy.org Tue Mar 27 07:28:17 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:28:17 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes: Close again. Message-ID: <20120327052817.D163F820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes Changeset: r54027:0d19de864ceb Date: 2012-03-27 01:15 -0400 http://bitbucket.org/pypy/pypy/changeset/0d19de864ceb/ Log: Close again. From noreply at buildbot.pypy.org Tue Mar 27 07:28:19 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:28:19 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: merged default in Message-ID: <20120327052819.2A6D3820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54028:854291ae4aa1 Date: 2012-03-27 01:16 -0400 http://bitbucket.org/pypy/pypy/changeset/854291ae4aa1/ Log: merged default in diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,8 +773,9 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) - loc1 = self.xrm.force_allocate_reg(op.result) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -114,9 +114,12 @@ if step == 1 and 0 <= start <= stop: newdata = data[start:stop] else: - newdata = [data[start + i*step] for i in range(slicelength)] + newdata = _getitem_slice_multistep(data, start, step, slicelength) return W_BytearrayObject(newdata) +def _getitem_slice_multistep(data, start, step, slicelength): + return [data[start + i*step] for i in range(slicelength)] + def contains__Bytearray_Int(space, w_bytearray, w_char): char = space.int_w(w_char) if not 0 <= char < 256: From noreply at buildbot.pypy.org Tue Mar 27 07:28:20 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:28:20 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: same changes throughout Message-ID: <20120327052820.65A8E820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54029:ca80ca868587 Date: 2012-03-27 01:17 -0400 http://bitbucket.org/pypy/pypy/changeset/ca80ca868587/ Log: same changes throughout diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -777,7 +777,7 @@ loc0 = self.xrm.loc(arg0) loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.xrm.possibly_free_var(arg0) def consider_convert_longlong_bytes_to_float(self, op): if longlong.is_64_bit: @@ -786,10 +786,11 @@ self.Perform(op, [loc0], loc1) self.rm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) - loc1 = self.xrm.force_allocate_reg(op.result) + arg0 = op.getarg(0) + loc0 = self.xrm.make_sure_var_in_reg(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.xrm.possibly_free_var(arg0) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't From noreply at buildbot.pypy.org Tue Mar 27 07:28:21 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:28:21 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: runner test for this Message-ID: <20120327052821.A33C9820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54030:aca32821c9d1 Date: 2012-03-27 01:22 -0400 http://bitbucket.org/pypy/pypy/changeset/aca32821c9d1/ Log: runner test for this diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -27,6 +27,12 @@ def constfloat(x): return ConstFloat(longlong.getfloatstorage(x)) +def boxlonglong(ll): + if longlong.is_64_bit: + return BoxInt(ll) + else: + return BoxFloat(ll) + class Runner(object): @@ -1623,6 +1629,11 @@ [boxfloat(2.5)], t).value assert res == longlong2float.float2longlong(2.5) + bytes = longlong2float.float2longlong(2.5) + res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, + [boxlonglong(res)], 'float').value + assert res == 2.5 + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) From noreply at buildbot.pypy.org Tue Mar 27 07:28:22 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:28:22 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Merged default in. Message-ID: <20120327052822.F082C820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54031:5939db56ff70 Date: 2012-03-27 01:24 -0400 http://bitbucket.org/pypy/pypy/changeset/5939db56ff70/ Log: Merged default in. diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -7,10 +7,17 @@ from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform as compiler +from pypy.rlib.rarithmetic import is_emulated_long from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop +_MSVC = compiler.name == "msvc" +_MINGW = compiler.name == "mingw32" +_WIN32 = _MSVC or _MINGW +_WIN64 = _WIN32 and is_emulated_long +# XXX get rid of 'is_emulated_long' and have a real config here. + class AbstractTestAsmGCRoot: # the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved # instructions: @@ -18,8 +25,8 @@ @classmethod def make_config(cls): - if compiler.name == "msvc": - py.test.skip("all asmgcroot tests disabled for MSVC") + if _MSVC and _WIN64: + py.test.skip("all asmgcroot tests disabled for MSVC X64") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy From noreply at buildbot.pypy.org Tue Mar 27 07:30:36 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:30:36 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: fix test for 32-bits Message-ID: <20120327053036.B1D50820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54032:a07c5ce1beca Date: 2012-03-27 05:28 +0000 http://bitbucket.org/pypy/pypy/changeset/a07c5ce1beca/ Log: fix test for 32-bits diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1632,7 +1632,7 @@ bytes = longlong2float.float2longlong(2.5) res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, [boxlonglong(res)], 'float').value - assert res == 2.5 + assert longlong.getrealfloat(res) == 2.5 def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') From noreply at buildbot.pypy.org Tue Mar 27 07:35:01 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 07:35:01 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: 32bit fix? Message-ID: <20120327053501.2D22B820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54033:22cf389efeca Date: 2012-03-27 01:34 -0400 http://bitbucket.org/pypy/pypy/changeset/22cf389efeca/ Log: 32bit fix? diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -328,6 +328,15 @@ def produce_into(self, builder, r): self.put(builder, [r.choice(builder.intvars)]) +class CastLongLongToFloatOperation(AbstractFloatOperation): + def produce_into(self, builder, r): + if longlong.is_64_bit: + self.put(builder, [r.choice(builder.intvars)]) + else: + if not builder.floatvars: + raise CannotProduceOperation + self.put(builder, [r.choice(builder.floatvars)]) + class CastFloatToIntOperation(AbstractFloatOperation): def produce_into(self, builder, r): if not builder.floatvars: @@ -450,7 +459,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) -OPERATIONS.append(CastIntToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) +OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS From noreply at buildbot.pypy.org Tue Mar 27 22:02:54 2012 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Mar 2012 22:02:54 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: abstract for my talk at UCT Message-ID: <20120327200254.C2E4F820D9@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4163:cb0a0f7c6b00 Date: 2012-03-27 22:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/cb0a0f7c6b00/ Log: abstract for my talk at UCT diff --git a/talk/uct2012/abstract.rst b/talk/uct2012/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/uct2012/abstract.rst @@ -0,0 +1,16 @@ +Building fast enough VMs in fast enough time +============================================ + +PyPy is a fast interpreter for the Python programming language. This however, +not the whole description. It's also a framework for building efficient +Virtual Machines for dynamic languages with relatively little effort. + +In this talk I would like to walk people through how the unique +infrastructure provided by the PyPy project let's you write efficient +virtual machines with minimal effort. This talk will cover the +architecture of the PyPy project, how to use it in your own VMs as +well as how to hook up an efficient garbage collector and Just In Time +compiler with minimal effort. + +This talk assumes no prior exposure to compiler techniques and assumes +some very basic knowledge of the Python programming language. From noreply at buildbot.pypy.org Tue Mar 27 23:23:56 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 23:23:56 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Make it work with exceptions. Message-ID: <20120327212356.B3058820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54034:1d2ad3c4d5c9 Date: 2012-03-27 17:23 -0400 http://bitbucket.org/pypy/pypy/changeset/1d2ad3c4d5c9/ Log: Make it work with exceptions. diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -87,6 +87,7 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) + hop.exception_cannot_occur() return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) class LongLong2FloatEntry(ExtRegistryEntry): @@ -98,4 +99,5 @@ def specialize_call(self, hop): [v_longlong] = hop.inputargs(lltype.SignedLongLong) + hop.exception_cannot_occur() return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -33,8 +33,15 @@ assert repr(res) == repr(x) def test_interpreted(): + def f(f1): + try: + ll = float2longlong(f1) + return longlong2float(ll) + except Exception: + return 500 + for x in enum_floats(): - res = interpret(fn, [x]) + res = interpret(f, [x]) assert repr(res) == repr(x) # ____________________________________________________________ From noreply at buildbot.pypy.org Tue Mar 27 23:26:45 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 23:26:45 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Added JVM support. Message-ID: <20120327212645.1E7ED820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54035:4c09a2e34853 Date: 2012-03-27 21:25 +0000 http://bitbucket.org/pypy/pypy/changeset/4c09a2e34853/ Log: Added JVM support. diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -243,4 +243,5 @@ 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, + 'convert_longlong_bytes_to_float': jvm.PYPYLONGBYTESTODOUBLE, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -942,6 +942,7 @@ PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) +PYPYLONGBYTESTODOUBLE = Method.v(jPyPy, 'pypy__longlong2float', (jLong,), jDouble) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Tue Mar 27 23:27:57 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 23:27:57 +0200 (CEST) Subject: [pypy-commit] pypy float-bytes-2: Closing branch for merge. Message-ID: <20120327212757.49487820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: float-bytes-2 Changeset: r54036:870f3aa6a7a8 Date: 2012-03-27 17:27 -0400 http://bitbucket.org/pypy/pypy/changeset/870f3aa6a7a8/ Log: Closing branch for merge. From noreply at buildbot.pypy.org Tue Mar 27 23:27:59 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 23:27:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a utility to convert longlong's bytes to a double. Message-ID: <20120327212759.B817D820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r54037:4d18306a2fb3 Date: 2012-03-27 17:27 -0400 http://bitbucket.org/pypy/pypy/changeset/4d18306a2fb3/ Log: Add a utility to convert longlong's bytes to a double. diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -27,6 +27,12 @@ def constfloat(x): return ConstFloat(longlong.getfloatstorage(x)) +def boxlonglong(ll): + if longlong.is_64_bit: + return BoxInt(ll) + else: + return BoxFloat(ll) + class Runner(object): @@ -1623,6 +1629,11 @@ [boxfloat(2.5)], t).value assert res == longlong2float.float2longlong(2.5) + bytes = longlong2float.float2longlong(2.5) + res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, + [boxlonglong(res)], 'float').value + assert longlong.getrealfloat(res) == 2.5 + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -328,6 +328,15 @@ def produce_into(self, builder, r): self.put(builder, [r.choice(builder.intvars)]) +class CastLongLongToFloatOperation(AbstractFloatOperation): + def produce_into(self, builder, r): + if longlong.is_64_bit: + self.put(builder, [r.choice(builder.intvars)]) + else: + if not builder.floatvars: + raise CannotProduceOperation + self.put(builder, [r.choice(builder.floatvars)]) + class CastFloatToIntOperation(AbstractFloatOperation): def produce_into(self, builder, r): if not builder.floatvars: @@ -450,6 +459,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) +OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1251,6 +1251,15 @@ else: self.mov(loc0, resloc) + def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,10 +773,24 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) + + def consider_convert_longlong_bytes_to_float(self, op): + if longlong.is_64_bit: + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) + else: + arg0 = op.getarg(0) + loc0 = self.xrm.make_sure_var_in_reg(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -295,6 +295,7 @@ return op rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,20 +968,22 @@ int_return %i2 """, transform=True) - def test_convert_float_bytes_to_int(self): - from pypy.rlib.longlong2float import float2longlong + def test_convert_float_bytes(self): + from pypy.rlib.longlong2float import float2longlong, longlong2float def f(x): - return float2longlong(x) + ll = float2longlong(x) + return longlong2float(ll) if longlong.is_64_bit: - result_var = "%i0" - return_op = "int_return" + tmp_var = "%i0" + result_var = "%f1" else: - result_var = "%f1" - return_op = "float_return" + tmp_var = "%f1" + result_var = "%f2" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %%f0 -> %(result_var)s - %(return_op)s %(result_var)s - """ % {"result_var": result_var, "return_op": return_op}) + convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s + convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s + float_return %(result_var)s + """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -672,6 +672,11 @@ a = longlong.getrealfloat(a) return longlong2float.float2longlong(a) + @arguments(LONGLONG_TYPECODE, returns="f") + def bhimpl_convert_longlong_bytes_to_float(a): + a = longlong2float.longlong2float(a) + return longlong.getfloatstorage(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -224,6 +224,7 @@ 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', 'convert_float_bytes_to_longlong', + 'convert_longlong_bytes_to_float', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -420,6 +420,7 @@ 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', + 'CONVERT_LONGLONG_BYTES_TO_FLOAT/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,3 +1,4 @@ +import math import sys import py @@ -15,7 +16,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3795,15 +3796,15 @@ res = self.interp_operations(g, [1]) assert res == 3 - def test_float2longlong(self): + def test_float_bytes(self): def f(n): - return float2longlong(n) + ll = float2longlong(n) + return longlong2float(ll) for x in [2.5, float("nan"), -2.5, float("inf")]: # There are tests elsewhere to verify the correctness of this. - expected = float2longlong(x) res = self.interp_operations(f, [x]) - assert longlong.getfloatstorage(res) == expected + assert res == x or math.isnan(x) and math.isnan(res) class TestLLtype(BaseLLtypeTests, LLJitMixin): diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -21,7 +21,7 @@ FLOAT_ARRAY_PTR = lltype.Ptr(lltype.Array(rffi.FLOAT)) # these definitions are used only in tests, when not translated -def longlong2float_emulator(llval): +def longlong2float(llval): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) ll_array[0] = llval @@ -51,12 +51,6 @@ eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" -static double pypy__longlong2float(long long x) { - double dd; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&dd, &x, 8); - return dd; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -71,12 +65,6 @@ } """]) -longlong2float = rffi.llexternal( - "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, - _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__longlong2float") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,4 +87,17 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) - return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) + hop.exception_cannot_occur() + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) + +class LongLong2FloatEntry(ExtRegistryEntry): + _about_ = longlong2float + + def compute_result_annotation(self, s_longlong): + assert annmodel.SomeInteger(knowntype=r_int64).contains(s_longlong) + return annmodel.SomeFloat() + + def specialize_call(self, hop): + [v_longlong] = hop.inputargs(lltype.SignedLongLong) + hop.exception_cannot_occur() + return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -2,6 +2,7 @@ from pypy.rlib.longlong2float import longlong2float, float2longlong from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rpython.test.test_llinterp import interpret def fn(f1): @@ -31,6 +32,18 @@ res = fn2(x) assert repr(res) == repr(x) +def test_interpreted(): + def f(f1): + try: + ll = float2longlong(f1) + return longlong2float(ll) + except Exception: + return 500 + + for x in enum_floats(): + res = interpret(f, [x]) + assert repr(res) == repr(x) + # ____________________________________________________________ def fnsingle(f1): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -350,6 +350,7 @@ 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() 'convert_float_bytes_to_longlong': LLOp(canfold=True), + 'convert_longlong_bytes_to_float': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -431,6 +431,10 @@ from pypy.rlib.longlong2float import float2longlong return float2longlong(a) +def op_convert_longlong_bytes_to_float(a): + from pypy.rlib.longlong2float import longlong2float + return longlong2float(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -43,5 +43,6 @@ #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) #endif diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -243,4 +243,5 @@ 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, + 'convert_longlong_bytes_to_float': jvm.PYPYLONGBYTESTODOUBLE, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -942,6 +942,7 @@ PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) +PYPYLONGBYTESTODOUBLE = Method.v(jPyPy, 'pypy__longlong2float', (jLong,), jDouble) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Tue Mar 27 23:31:09 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Mar 2012 23:31:09 +0200 (CEST) Subject: [pypy-commit] pypy dynamic-specialized-tuple: Merged default. Message-ID: <20120327213109.534CD820D9@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dynamic-specialized-tuple Changeset: r54038:c5b11dfcf240 Date: 2012-03-27 17:30 -0400 http://bitbucket.org/pypy/pypy/changeset/c5b11dfcf240/ Log: Merged default. diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,86 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 miles view: + +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + The RPython standard library is to be found in the ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. + +.. _`we have a tracing JIT`: jit/index.html + +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. + +.. _`Garbage collection in PyPy`: garbage_collection.html + + +Toolset +======= + +xxx diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -27,6 +27,12 @@ def constfloat(x): return ConstFloat(longlong.getfloatstorage(x)) +def boxlonglong(ll): + if longlong.is_64_bit: + return BoxInt(ll) + else: + return BoxFloat(ll) + class Runner(object): @@ -1623,6 +1629,11 @@ [boxfloat(2.5)], t).value assert res == longlong2float.float2longlong(2.5) + bytes = longlong2float.float2longlong(2.5) + res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, + [boxlonglong(res)], 'float').value + assert longlong.getrealfloat(res) == 2.5 + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -328,6 +328,15 @@ def produce_into(self, builder, r): self.put(builder, [r.choice(builder.intvars)]) +class CastLongLongToFloatOperation(AbstractFloatOperation): + def produce_into(self, builder, r): + if longlong.is_64_bit: + self.put(builder, [r.choice(builder.intvars)]) + else: + if not builder.floatvars: + raise CannotProduceOperation + self.put(builder, [r.choice(builder.floatvars)]) + class CastFloatToIntOperation(AbstractFloatOperation): def produce_into(self, builder, r): if not builder.floatvars: @@ -450,6 +459,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) +OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1251,6 +1251,15 @@ else: self.mov(loc0, resloc) + def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,10 +773,24 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) + + def consider_convert_longlong_bytes_to_float(self, op): + if longlong.is_64_bit: + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) + else: + arg0 = op.getarg(0) + loc0 = self.xrm.make_sure_var_in_reg(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,7 +601,9 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers. + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -313,6 +313,7 @@ return op rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,14 +968,22 @@ int_return %i2 """, transform=True) - def test_convert_float_bytes_to_int(self): - from pypy.rlib.longlong2float import float2longlong + def test_convert_float_bytes(self): + from pypy.rlib.longlong2float import float2longlong, longlong2float def f(x): - return float2longlong(x) + ll = float2longlong(x) + return longlong2float(ll) + if longlong.is_64_bit: + tmp_var = "%i0" + result_var = "%f1" + else: + tmp_var = "%f1" + result_var = "%f2" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %f0 -> %i0 - int_return %i0 - """) + convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s + convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s + float_return %(result_var)s + """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -672,6 +672,11 @@ a = longlong.getrealfloat(a) return longlong2float.float2longlong(a) + @arguments(LONGLONG_TYPECODE, returns="f") + def bhimpl_convert_longlong_bytes_to_float(a): + a = longlong2float.longlong2float(a) + return longlong.getfloatstorage(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -224,6 +224,7 @@ 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', 'convert_float_bytes_to_longlong', + 'convert_longlong_bytes_to_float', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -420,6 +420,7 @@ 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', + 'CONVERT_LONGLONG_BYTES_TO_FLOAT/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,3 +1,4 @@ +import math import sys import py @@ -15,7 +16,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3795,15 +3796,15 @@ res = self.interp_operations(g, [1]) assert res == 3 - def test_float2longlong(self): + def test_float_bytes(self): def f(n): - return float2longlong(n) + ll = float2longlong(n) + return longlong2float(ll) for x in [2.5, float("nan"), -2.5, float("inf")]: # There are tests elsewhere to verify the correctness of this. - expected = float2longlong(x) res = self.interp_operations(f, [x]) - assert longlong.getfloatstorage(res) == expected + assert res == x or math.isnan(x) and math.isnan(res) class TestLLtype(BaseLLtypeTests, LLJitMixin): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -16,13 +16,15 @@ appleveldefs = {} interpleveldefs = {} if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" interpleveldefs["clock_getres"] = "interp_time.clock_getres" for name in [ "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" ]: - interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name class Module(MixedModule): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.interpreter.error import exception_from_errno diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -90,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -179,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -165,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -6,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -62,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -359,6 +359,7 @@ name="int64", char="q", w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -386,23 +387,6 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - self.w_longlongdtype = W_Dtype( - types.Int64(), - num=9, - kind=SIGNEDLTR, - name='int64', - char='q', - w_box_type = space.gettypefor(interp_boxes.W_LongLongBox), - alternate_constructors=[space.w_long], - ) - self.w_ulonglongdtype = W_Dtype( - types.UInt64(), - num=10, - kind=UNSIGNEDLTR, - name='uint64', - char='Q', - w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox), - ) self.w_stringdtype = W_Dtype( types.StringType(1), num=18, @@ -435,17 +419,19 @@ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_longlongdtype, self.w_ulonglongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) self.dtypes_by_name = {} - for dtype in self.builtin_dtypes: + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): self.dtypes_by_name[dtype.name] = dtype can_name = dtype.kind + str(dtype.itemtype.get_element_size()) self.dtypes_by_name[can_name] = dtype @@ -473,7 +459,7 @@ 'LONG': self.w_longdtype, 'UNICODE': self.w_unicodedtype, #'OBJECT', - 'ULONGLONG': self.w_ulonglongdtype, + 'ULONGLONG': self.w_uint64dtype, 'STRING': self.w_stringdtype, #'CDOUBLE', #'DATETIME', diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -269,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -83,8 +83,9 @@ return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -124,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -152,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -213,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -514,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -714,11 +725,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): @@ -727,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -773,8 +790,9 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -786,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -793,8 +817,9 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right @@ -832,8 +857,13 @@ Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig class ToStringArray(Call1): def __init__(self, child): @@ -842,9 +872,9 @@ self.s = StringBuilder(child.size * self.item_size) Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, child) - self.res = W_NDimArray([1], dtype, 'C') - self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - self.res.storage) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) def create_sig(self): return signature.ToStringSignature(self.calc_dtype, @@ -950,7 +980,7 @@ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): dtype = self.find_dtype() @@ -1125,7 +1155,8 @@ @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1170,8 +1201,13 @@ break if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + shapelen = len(shape) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin arr = W_NDimArray(shape[:], dtype=dtype, order=order) - shapelen = len(shape) arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res @@ -314,7 +425,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,17 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) class ToStringSignature(Call1): def __init__(self, dtype, child): @@ -327,10 +361,10 @@ from pypy.module.micronumpy.interp_numarray import ToStringArray assert isinstance(arr, ToStringArray) - arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to( + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( self.dtype)) for i in range(arr.item_size): - arr.s.append(arr.res_casted[i]) + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -455,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -302,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -333,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -211,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) @@ -983,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1483,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -197,7 +197,6 @@ def test_signbit(self): from _numpypy import signbit, copysign - import struct assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == [False, False, False, False, False, False]).all() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -131,7 +131,7 @@ # bogus. We need to improve the situation somehow. self.check_simple_loop({'getinteriorfield_raw': 2, 'setinteriorfield_raw': 1, - 'arraylen_gc': 1, + 'arraylen_gc': 2, 'guard_true': 1, 'int_lt': 1, 'jump': 1, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -500,6 +500,19 @@ BoxType = interp_boxes.W_ULongBox format_code = "L" +def _int64_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.tolonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Int64(BaseType, Integer): _attrs_ = () @@ -507,6 +520,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + class NonNativeInt64(BaseType, NonNativeInteger): _attrs_ = () @@ -514,6 +529,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" + _coerce = func_with_new_name(_int64_coerce, '_coerce') + def _uint64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -14,10 +14,10 @@ def setup_module(mod): if os.name != 'nt': - mod.space = gettestobjspace(usemodules=['posix', 'fcntl']) + mod.space = gettestobjspace(usemodules=['posix', 'fcntl', 'struct']) else: # On windows, os.popen uses the subprocess module - mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread']) + mod.space = gettestobjspace(usemodules=['posix', '_rawffi', 'thread', 'struct']) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") mod.path2 = udir.join('test_posix2-') diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -128,3 +128,82 @@ loop, = log.loops_by_filename(self.filepath) ops = loop.ops_by_id('look') assert 'call' not in log.opnames(ops) + + #XXX the following tests only work with strategies enabled + + def test_should_not_create_intobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_sets(self): + def main(n): + i = 0 + s = set() + while i < n: + s.add(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_intobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_should_not_create_stringobject_with_lists(self): + def main(n): + i = 0 + l = [] + while i < n: + l.append(str(i)) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_list_from_string(self): + def main(n): + i = 0 + l = [] + while i < n: + l = list("abc" * i) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 + + def test_optimized_create_set_from_list(self): + def main(n): + i = 0 + while i < n: + s = set([1,2,3]) + i += 1 + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + opnames = log.opnames(loop.allops()) + assert opnames.count('new_with_vtable') == 0 diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -3,7 +3,7 @@ class AppTestRCTime: def setup_class(cls): - space = gettestobjspace(usemodules=('rctime',)) + space = gettestobjspace(usemodules=('rctime', 'struct')) cls.space = space def test_attributes(self): diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -142,3 +142,39 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + def test_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 3] + + assert array_equal(a, b) + assert array_equal(a, array(b)) + assert array_equal(array(a), b) + assert array_equal(array(a), array(b)) + + def test_not_equal(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [1, 2, 4] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) + + def test_mismatched_shape(self): + from _numpypy import array + from numpypy import array_equal + + a = [1, 2, 3] + b = [[1, 2, 3], [1, 2, 3]] + + assert not array_equal(a, b) + assert not array_equal(a, array(b)) + assert not array_equal(array(a), b) + assert not array_equal(array(a), array(b)) diff --git a/pypy/module/test_lib_pypy/test_binascii.py b/pypy/module/test_lib_pypy/test_binascii.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_binascii.py +++ /dev/null @@ -1,8 +0,0 @@ - -""" Some more binascii.py tests -""" - -class AppTestBinAscii: - def test_incorrect_padding(self): - import binascii - raises(binascii.Error, "'x'.decode('base64')") diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -19,7 +19,7 @@ class AppTestZipImport: def setup_class(cls): - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space cls.w_created_paths = space.wrap(created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -47,9 +47,9 @@ """).compile() if cls.compression == ZIP_DEFLATED: - space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'zlib', 'rctime', 'struct']) else: - space = gettestobjspace(usemodules=['zipimport', 'rctime']) + space = gettestobjspace(usemodules=['zipimport', 'rctime', 'struct']) cls.space = space tmpdir = udir.ensure('zipimport_%s' % cls.__name__, dir=1) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -7,8 +7,7 @@ from pypy.tool.uid import uid, Hashable from pypy.tool.descriptor import roproperty from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from pypy.tool.identity_dict import identity_dict -from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong +from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -546,7 +545,7 @@ for n in cases[:len(cases)-has_default]: if is_valid_int(n): continue - if isinstance(n, (r_longlong, r_ulonglong)): + if isinstance(n, (r_longlong, r_ulonglong, r_uint)): continue if isinstance(n, (str, unicode)) and len(n) == 1: continue diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py +import py, sys from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments @@ -893,6 +893,8 @@ """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG bytecode """ + if sys.version_info < (2, 7): + py.test.skip("2.7 only test") self.patch_opcodes('BUILD_LIST_FROM_ARG') try: def f(): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -111,9 +111,15 @@ length = len(data) start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - newdata = [data[start + i*step] for i in range(slicelength)] + if step == 1 and 0 <= start <= stop: + newdata = data[start:stop] + else: + newdata = _getitem_slice_multistep(data, start, step, slicelength) return W_BytearrayObject(newdata) +def _getitem_slice_multistep(data, start, step, slicelength): + return [data[start + i*step] for i in range(slicelength)] + def contains__Bytearray_Int(space, w_bytearray, w_char): char = space.int_w(w_char) if not 0 <= char < 256: diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,10 +127,10 @@ def iter(self, w_dict): return ModuleDictIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): + def w_keys(self, w_dict): space = self.space - iterator = self.unerase(w_dict.dstorage).iteritems - return [space.wrap(key) for key, cell in iterator()] + l = self.unerase(w_dict.dstorage).keys() + return space.newlist_str(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -90,9 +90,9 @@ def _add_indirections(): dict_methods = "setitem setitem_str getitem \ getitem_str delitem length \ - clear keys values \ + clear w_keys values \ items iter setdefault \ - popitem".split() + popitem listview_str listview_int".split() def make_method(method): def f(self, *args): @@ -113,7 +113,7 @@ def get_empty_storage(self): raise NotImplementedError - def keys(self, w_dict): + def w_keys(self, w_dict): iterator = self.iter(w_dict) result = [] while 1: @@ -121,7 +121,7 @@ if w_key is not None: result.append(w_key) else: - return result + return self.space.newlist(result) def values(self, w_dict): iterator = self.iter(w_dict) @@ -160,6 +160,11 @@ w_dict.strategy = strategy w_dict.dstorage = storage + def listview_str(self, w_dict): + return None + + def listview_int(self, w_dict): + return None class EmptyDictStrategy(DictStrategy): @@ -371,8 +376,9 @@ self.switch_to_object_strategy(w_dict) return w_dict.getitem(w_key) - def keys(self, w_dict): - return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + def w_keys(self, w_dict): + l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + return self.space.newlist(l) def values(self, w_dict): return self.unerase(w_dict.dstorage).values() @@ -425,8 +431,8 @@ def iter(self, w_dict): return ObjectIteratorImplementation(self.space, self, w_dict) - def keys(self, w_dict): - return self.unerase(w_dict.dstorage).keys() + def w_keys(self, w_dict): + return self.space.newlist(self.unerase(w_dict.dstorage).keys()) class StringDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -469,9 +475,15 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) + def listview_str(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + def iter(self, w_dict): return StrIteratorImplementation(self.space, self, w_dict) + def w_keys(self, w_dict): + return self.space.newlist_str(self.listview_str(w_dict)) + class _WrappedIteratorMixin(object): _mixin_ = True @@ -534,6 +546,14 @@ def iter(self, w_dict): return IntIteratorImplementation(self.space, self, w_dict) + def listview_int(self, w_dict): + return self.unerase(w_dict.dstorage).keys() + + def w_keys(self, w_dict): + # XXX there is no space.newlist_int yet + space = self.space + return space.call_function(space.w_list, w_dict) + class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation): pass @@ -688,7 +708,7 @@ return space.newlist(w_self.items()) def dict_keys__DictMulti(space, w_self): - return space.newlist(w_self.keys()) + return w_self.w_keys() def dict_values__DictMulti(space, w_self): return space.newlist(w_self.values()) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -76,7 +76,7 @@ def keys(self, w_dict): space = self.space - return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()] + return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -62,8 +62,14 @@ w_fill = space.w_None if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - for w_key in space.listview(w_keys): - w_dict.setitem(w_key, w_fill) + + strlist = space.listview_str(w_keys) + if strlist is not None: + for key in strlist: + w_dict.setitem_str(key, w_fill) + else: + for w_key in space.listview(w_keys): + w_dict.setitem(w_key, w_fill) else: w_dict = space.call_function(w_type) for w_key in space.listview(w_keys): diff --git a/pypy/objspace/std/frozensettype.py b/pypy/objspace/std/frozensettype.py --- a/pypy/objspace/std/frozensettype.py +++ b/pypy/objspace/std/frozensettype.py @@ -39,13 +39,11 @@ def descr__frozenset__new__(space, w_frozensettype, w_iterable=gateway.NoneNotWrapped): from pypy.objspace.std.setobject import W_FrozensetObject - from pypy.objspace.std.setobject import make_setdata_from_w_iterable if (space.is_w(w_frozensettype, space.w_frozenset) and w_iterable is not None and type(w_iterable) is W_FrozensetObject): return w_iterable w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype) - data = make_setdata_from_w_iterable(space, w_iterable) - W_FrozensetObject.__init__(w_obj, space, data) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj frozenset_typedef = StdTypeDef("frozenset", diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -29,9 +29,8 @@ class W_SeqIterObject(W_AbstractSeqIterObject): """Sequence iterator implementation for general sequences.""" -class W_FastListIterObject(W_AbstractSeqIterObject): - """Sequence iterator specialized for lists, accessing directly their - RPython-level list of wrapped objects. +class W_FastListIterObject(W_AbstractSeqIterObject): # XXX still needed + """Sequence iterator specialized for lists. """ class W_ReverseSeqIterObject(W_Object): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -139,6 +139,16 @@ new erased object as storage""" self.strategy.init_from_list_w(self, list_w) + def clear(self, space): + """Initializes (or overrides) the listobject as empty.""" + self.space = space + if space.config.objspace.std.withliststrategies: + strategy = space.fromcache(EmptyListStrategy) + else: + strategy = space.fromcache(ObjectListStrategy) + self.strategy = strategy + strategy.clear(self) + def clone(self): """Returns a clone by creating a new listobject with the same strategy and a copy of the storage""" @@ -200,6 +210,11 @@ """ Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None. """ return self.strategy.getitems_str(self) + + def getitems_int(self): + """ Return the items in the list as unwrapped ints. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_int(self) # ___________________________________________________ @@ -300,6 +315,9 @@ def getitems_str(self, w_list): return None + def getitems_int(self, w_list): + return None + def getstorage_copy(self, w_list): raise NotImplementedError @@ -358,6 +376,9 @@ assert len(list_w) == 0 w_list.lstorage = self.erase(None) + def clear(self, w_list): + w_list.lstorage = self.erase(None) + erase, unerase = rerased.new_erasing_pair("empty") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -516,6 +537,9 @@ raise IndexError return start + i * step + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + def getitem(self, w_list, i): return self.wrap(self._getitem_unwrapped(w_list, i)) @@ -696,6 +720,7 @@ for i in l: if i == obj: return True + return False return ListStrategy.contains(self, w_list, w_obj) def length(self, w_list): @@ -937,6 +962,9 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.erase(list_w) + def clear(self, w_list): + w_list.lstorage = self.erase([]) + def contains(self, w_list, w_obj): return ListStrategy.contains(self, w_list, w_obj) @@ -970,6 +998,9 @@ if reverse: l.reverse() + def getitems_int(self, w_list): + return self.unerase(w_list.lstorage) + class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -1027,37 +1058,49 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) init_defaults = [None] def init__List(space, w_list, __args__): - from pypy.objspace.std.tupleobject import W_TupleObject + from pypy.objspace.std.tupleobject import W_AbstractTupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - w_list.__init__(space, []) + w_list.clear(space) if w_iterable is not None: - # unfortunately this is duplicating space.unpackiterable to avoid - # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastListIterObject optimization. - if isinstance(w_iterable, W_ListObject): - w_list.extend(w_iterable) - elif isinstance(w_iterable, W_TupleObject): - w_list.extend(W_ListObject(space, w_iterable.getitems_copy(space))) - else: - _init_from_iterable(space, w_list, w_iterable) + if type(w_iterable) is W_ListObject: + w_iterable.copy_into(w_list) + return + elif isinstance(w_iterable, W_AbstractTupleObject): + w_list.__init__(space, w_iterable.getitems_copy(space)) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + w_list.strategy = strategy = space.fromcache(IntegerListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(intlist[:]) + return + + strlist = space.listview_str(w_iterable) + if strlist is not None: + w_list.strategy = strategy = space.fromcache(StringListStrategy) + # need to copy because intlist can share with w_iterable + w_list.lstorage = strategy.erase(strlist[:]) + return + + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterable, GeneratorIterator): + w_iterable.unpack_into_w(w_list) + return + # /xxx + _init_from_iterable(space, w_list, w_iterable) def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List - # xxx special hack for speed - from pypy.interpreter.generator import GeneratorIterator - if isinstance(w_iterable, GeneratorIterator): - w_iterable.unpack_into_w(w_list) - return - # /xxx w_iterator = space.iter(w_iterable) while True: try: diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, space, []) + w_obj.clear(space) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -694,6 +694,8 @@ self.delitem(w_dict, w_key) return (w_key, w_value) + # XXX could implement a more efficient w_keys based on space.newlist_str + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -227,10 +227,7 @@ return W_ComplexObject(x.real, x.imag) if isinstance(x, set): - rdict_w = r_dict(self.eq_w, self.hash_w) - for item in x: - rdict_w[self.wrap(item)] = None - res = W_SetObject(self, rdict_w) + res = W_SetObject(self, self.newlist([self.wrap(item) for item in x])) return res if isinstance(x, frozenset): @@ -325,7 +322,7 @@ def newset(self): from pypy.objspace.std.setobject import newset - return W_SetObject(self, newset(self)) + return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): return W_SliceObject(w_start, w_end, w_step) @@ -402,8 +399,8 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): - t = w_obj.getitems_copy(self) - elif isinstance(w_obj, W_ListObject): + t = w_obj.getitems_copy(space) + elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) @@ -416,8 +413,8 @@ """ Fast paths """ if isinstance(w_obj, W_AbstractTupleObject): - t = w_obj.tolist(self) - elif isinstance(w_obj, W_ListObject): + t = w_obj.tolist(space) + elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() else: @@ -438,7 +435,7 @@ return self.fixedview(w_obj, expected_length, unroll=True) def listview(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_ListObject): + if type(w_obj) is W_ListObject: t = w_obj.getitems() elif isinstance(w_obj, W_AbstractTupleObject): t = w_obj.getitems_copy(self) @@ -449,8 +446,25 @@ return t def listview_str(self, w_obj): - if isinstance(w_obj, W_ListObject): + # note: uses exact type checking for objects with strategies, + # and isinstance() for others. See test_listobject.test_uses_custom... + if type(w_obj) is W_ListObject: return w_obj.getitems_str() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_str() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_str() + if isinstance(w_obj, W_StringObject): + return w_obj.listview_str() + return None + + def listview_int(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems_int() + if type(w_obj) is W_DictMultiObject: + return w_obj.listview_int() + if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: + return w_obj.listview_int() return None def sliceindices(self, w_slice, w_length): diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,11 +41,6 @@ return w_self return W_RopeObject(w_self._node) - def unicode_w(w_self, space): - # XXX should this use the default encoding? - from pypy.objspace.std.unicodetype import plain_str2unicode - return plain_str2unicode(space, w_self._node.flatten_string()) - W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -7,6 +7,12 @@ from pypy.interpreter.argument import Signature from pypy.objspace.std.settype import set_typedef as settypedef from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef +from pypy.rlib import rerased +from pypy.rlib.objectmodel import instantiate +from pypy.interpreter.generator import GeneratorIterator +from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.stringobject import W_StringObject class W_BaseSetObject(W_Object): typedef = None @@ -20,88 +26,859 @@ return True return False - - def __init__(w_self, space, setdata): + def __init__(w_self, space, w_iterable=None): """Initialize the set by taking ownership of 'setdata'.""" - assert setdata is not None - w_self.setdata = setdata + w_self.space = space + set_strategy_and_setdata(space, w_self, w_iterable) def __repr__(w_self): """representation for debugging purposes""" - reprlist = [repr(w_item) for w_item in w_self.setdata.keys()] + reprlist = [repr(w_item) for w_item in w_self.getkeys()] return "<%s(%s)>" % (w_self.__class__.__name__, ', '.join(reprlist)) + def from_storage_and_strategy(w_self, storage, strategy): + obj = w_self._newobj(w_self.space, None) + assert isinstance(obj, W_BaseSetObject) + obj.strategy = strategy + obj.sstorage = storage + return obj + _lifeline_ = None def getweakref(self): return self._lifeline_ + def setweakref(self, space, weakreflifeline): self._lifeline_ = weakreflifeline def delweakref(self): self._lifeline_ = None + def switch_to_object_strategy(self, space): + d = self.strategy.getdict_w(self) + self.strategy = strategy = space.fromcache(ObjectSetStrategy) + self.sstorage = strategy.erase(d) + + def switch_to_empty_strategy(self): + self.strategy = strategy = self.space.fromcache(EmptySetStrategy) + self.sstorage = strategy.get_empty_storage() + + # _____________ strategy methods ________________ + + + def clear(self): + """ Removes all elements from the set. """ + self.strategy.clear(self) + + def copy_real(self): + """ Returns a clone of the set. Frozensets storages are also copied.""" + return self.strategy.copy_real(self) + + def length(self): + """ Returns the number of items inside the set. """ + return self.strategy.length(self) + + def add(self, w_key): + """ Adds an element to the set. The element must be wrapped. """ + self.strategy.add(self, w_key) + + def remove(self, w_item): + """ Removes the given element from the set. Element must be wrapped. """ + return self.strategy.remove(self, w_item) + + def getdict_w(self): + """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ + return self.strategy.getdict_w(self) + + def listview_str(self): + """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ + return self.strategy.listview_str(self) + + def listview_int(self): + """ If this is an int set return its contents as a list of uwnrapped ints. Otherwise return None. """ + return self.strategy.listview_int(self) + + def get_storage_copy(self): + """ Returns a copy of the storage. Needed when we want to clone all elements from one set and + put them into another. """ + return self.strategy.get_storage_copy(self) + + def getkeys(self): + """ Returns a list of all elements inside the set. Only used in __repr__. Use as less as possible.""" + return self.strategy.getkeys(self) + + def difference(self, w_other): + """ Returns a set with all items that are in this set, but not in w_other. W_other must be a set.""" + return self.strategy.difference(self, w_other) + + def difference_update(self, w_other): + """ As difference but overwrites the sets content with the result. W_other must be a set.""" + self.strategy.difference_update(self, w_other) + + def symmetric_difference(self, w_other): + """ Returns a set with all items that are either in this set or in w_other, but not in both. W_other must be a set. """ + return self.strategy.symmetric_difference(self, w_other) + + def symmetric_difference_update(self, w_other): + """ As symmetric_difference but overwrites the content of the set with the result. W_other must be a set.""" + self.strategy.symmetric_difference_update(self, w_other) + + def intersect(self, w_other): + """ Returns a set with all items that exists in both sets, this set and in w_other. W_other must be a set. """ + return self.strategy.intersect(self, w_other) + + def intersect_update(self, w_other): + """ Keeps only those elements found in both sets, removing all other elements. W_other must be a set.""" + self.strategy.intersect_update(self, w_other) + + def issubset(self, w_other): + """ Checks wether this set is a subset of w_other. W_other must be a set. """ + return self.strategy.issubset(self, w_other) + + def isdisjoint(self, w_other): + """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. W_other must be a set.""" + return self.strategy.isdisjoint(self, w_other) + + def update(self, w_other): + """ Appends all elements from the given set to this set. W_other must be a set.""" + self.strategy.update(self, w_other) + + def has_key(self, w_key): + """ Checks wether this set contains the given wrapped key.""" + return self.strategy.has_key(self, w_key) + + def equals(self, w_other): + """ Checks wether this set and the given set are equal, i.e. contain the same elements. W_other must be a set.""" + return self.strategy.equals(self, w_other) + + def iter(self): + """ Returns an iterator of the elements from this set. """ + return self.strategy.iter(self) + + def popitem(self): + """ Removes an arbitrary element from the set. May raise KeyError if set is empty.""" + return self.strategy.popitem(self) + class W_SetObject(W_BaseSetObject): from pypy.objspace.std.settype import set_typedef as typedef - def _newobj(w_self, space, rdict_w): - """Make a new set by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new set by taking ownership of 'w_iterable'.""" if type(w_self) is W_SetObject: - return W_SetObject(space, rdict_w) + return W_SetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_SetObject, w_type) - W_SetObject.__init__(w_obj, space, rdict_w) + W_SetObject.__init__(w_obj, space, w_iterable) return w_obj class W_FrozensetObject(W_BaseSetObject): from pypy.objspace.std.frozensettype import frozenset_typedef as typedef hash = 0 - def _newobj(w_self, space, rdict_w): - """Make a new frozenset by taking ownership of 'rdict_w'.""" + def _newobj(w_self, space, w_iterable): + """Make a new frozenset by taking ownership of 'w_iterable'.""" if type(w_self) is W_FrozensetObject: - return W_FrozensetObject(space, rdict_w) + return W_FrozensetObject(space, w_iterable) w_type = space.type(w_self) w_obj = space.allocate_instance(W_FrozensetObject, w_type) - W_FrozensetObject.__init__(w_obj, space, rdict_w) + W_FrozensetObject.__init__(w_obj, space, w_iterable) return w_obj registerimplementation(W_BaseSetObject) registerimplementation(W_SetObject) registerimplementation(W_FrozensetObject) -class W_SetIterObject(W_Object): - from pypy.objspace.std.settype import setiter_typedef as typedef +class SetStrategy(object): + def __init__(self, space): + self.space = space - def __init__(w_self, setdata): - w_self.content = content = setdata - w_self.len = len(content) - w_self.pos = 0 - w_self.iterator = w_self.content.iterkeys() + def get_empty_dict(self): + """ Returns an empty dictionary depending on the strategy. Used to initalize a new storage. """ + raise NotImplementedError - def next_entry(w_self): - for w_key in w_self.iterator: + def get_empty_storage(self): + """ Returns an empty storage (erased) object. Used to initialize an empty set.""" + raise NotImplementedError + + def listview_str(self, w_set): + return None + + def listview_int(self, w_set): + return None + + #def erase(self, storage): + # raise NotImplementedError + + #def unerase(self, storage): + # raise NotImplementedError + + # __________________ methods called on W_SetObject _________________ + + def clear(self, w_set): + raise NotImplementedError + + def copy_real(self, w_set): + raise NotImplementedError + + def length(self, w_set): + raise NotImplementedError + + def add(self, w_set, w_key): + raise NotImplementedError + + def remove(self, w_set, w_item): + raise NotImplementedError + + def getdict_w(self, w_set): + raise NotImplementedError + + def get_storage_copy(self, w_set): + raise NotImplementedError + + def getkeys(self, w_set): + raise NotImplementedError + + def difference(self, w_set, w_other): + raise NotImplementedError + + def difference_update(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference(self, w_set, w_other): + raise NotImplementedError + + def symmetric_difference_update(self, w_set, w_other): + raise NotImplementedError + + def intersect(self, w_set, w_other): + raise NotImplementedError + + def intersect_update(self, w_set, w_other): + raise NotImplementedError + + def issubset(self, w_set, w_other): + raise NotImplementedError + + def isdisjoint(self, w_set, w_other): + raise NotImplementedError + + def update(self, w_set, w_other): + raise NotImplementedError + + def has_key(self, w_set, w_key): + raise NotImplementedError + + def equals(self, w_set, w_other): + raise NotImplementedError + + def iter(self, w_set): + raise NotImplementedError + + def popitem(self, w_set): + raise NotImplementedError + +class EmptySetStrategy(SetStrategy): + + erase, unerase = rerased.new_erasing_pair("empty") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(None) + + def is_correct_type(self, w_key): + return False + + def length(self, w_set): + return 0 + + def clear(self, w_set): + pass + + def copy_real(self, w_set): + storage = self.erase(None) + clone = w_set.from_storage_and_strategy(storage, self) + return clone + + def add(self, w_set, w_key): + if type(w_key) is W_IntObject: + strategy = self.space.fromcache(IntegerSetStrategy) + elif type(w_key) is W_StringObject: + strategy = self.space.fromcache(StringSetStrategy) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_empty_storage() + w_set.add(w_key) + + def remove(self, w_set, w_item): + return False + + def getdict_w(self, w_set): + return newset(self.space) + + def get_storage_copy(self, w_set): + return w_set.sstorage + + def getkeys(self, w_set): + return [] + + def has_key(self, w_set, w_key): + return False + + def equals(self, w_set, w_other): + if w_other.strategy is self or w_other.length() == 0: + return True + return False + + def difference(self, w_set, w_other): + return w_set.copy_real() + + def difference_update(self, w_set, w_other): + pass + + def intersect(self, w_set, w_other): + return w_set.copy_real() + + def intersect_update(self, w_set, w_other): + pass + + def isdisjoint(self, w_set, w_other): + return True + + def issubset(self, w_set, w_other): + return True + + def symmetric_difference(self, w_set, w_other): + return w_other.copy_real() + + def symmetric_difference_update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def update(self, w_set, w_other): + w_set.strategy = w_other.strategy + w_set.sstorage = w_other.get_storage_copy() + + def iter(self, w_set): + return EmptyIteratorImplementation(self.space, w_set) + + def popitem(self, w_set): + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + +class AbstractUnwrappedSetStrategy(object): + _mixin_ = True + + def is_correct_type(self, w_key): + """ Checks wether the given wrapped key fits this strategy.""" + raise NotImplementedError + + def unwrap(self, w_item): + """ Returns the unwrapped value of the given wrapped item.""" + raise NotImplementedError + + def wrap(self, item): + """ Returns a wrapped version of the given unwrapped item. """ + raise NotImplementedError + + def get_storage_from_list(self, list_w): + setdata = self.get_empty_dict() + for w_item in list_w: + setdata[self.unwrap(w_item)] = None + return self.erase(setdata) + + def get_storage_from_unwrapped_list(self, items): + setdata = self.get_empty_dict() + for item in items: + setdata[item] = None + return self.erase(setdata) + + def length(self, w_set): + return len(self.unerase(w_set.sstorage)) + + def clear(self, w_set): + w_set.switch_to_empty_strategy() + + def copy_real(self, w_set): + # may be used internally on frozen sets, although frozenset().copy() + # returns self in frozenset_copy__Frozenset. + strategy = w_set.strategy + d = self.unerase(w_set.sstorage) + storage = self.erase(d.copy()) + clone = w_set.from_storage_and_strategy(storage, strategy) + return clone + + def add(self, w_set, w_key): + if self.is_correct_type(w_key): + d = self.unerase(w_set.sstorage) + d[self.unwrap(w_key)] = None + else: + w_set.switch_to_object_strategy(self.space) + w_set.add(w_key) + + def remove(self, w_set, w_item): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + d = self.unerase(w_set.sstorage) + if not self.is_correct_type(w_item): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.remove(w_item) + + key = self.unwrap(w_item) + try: + del d[key] + return True + except KeyError: + return False + + def getdict_w(self, w_set): + result = newset(self.space) + keys = self.unerase(w_set.sstorage).keys() + for key in keys: + result[self.wrap(key)] = None + return result + + def get_storage_copy(self, w_set): + d = self.unerase(w_set.sstorage) + copy = self.erase(d.copy()) + return copy + + def getkeys(self, w_set): + keys = self.unerase(w_set.sstorage).keys() + keys_w = [self.wrap(key) for key in keys] + return keys_w + + def has_key(self, w_set, w_key): + from pypy.objspace.std.dictmultiobject import _never_equal_to_string + if not self.is_correct_type(w_key): + #XXX check type of w_item and immediately return False in some cases + w_set.switch_to_object_strategy(self.space) + return w_set.has_key(w_key) + d = self.unerase(w_set.sstorage) + return self.unwrap(w_key) in d + + def equals(self, w_set, w_other): + if w_set.length() != w_other.length(): + return False + items = self.unerase(w_set.sstorage).keys() + for key in items: + if not w_other.has_key(self.wrap(key)): + return False + return True + + def _difference_wrapped(self, w_set, w_other): + strategy = self.space.fromcache(ObjectSetStrategy) + + d_new = strategy.get_empty_dict() + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + d_new[w_item] = None + + return strategy.erase(d_new) + + def _difference_unwrapped(self, w_set, w_other): + iterator = self.unerase(w_set.sstorage).iterkeys() + other_dict = self.unerase(w_other.sstorage) + result_dict = self.get_empty_dict() + for key in iterator: + if key not in other_dict: + result_dict[key] = None + return self.erase(result_dict) + + def _difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._difference_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = w_set.strategy + storage = w_set.sstorage + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._difference_wrapped(w_set, w_other) + return storage, strategy + + def difference(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_newset = w_set.from_storage_and_strategy(storage, strategy) + return w_newset + + def difference_update(self, w_set, w_other): + storage, strategy = self._difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _symmetric_difference_unwrapped(self, w_set, w_other): + d_new = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_other.keys(): + if not key in d_this: + d_new[key] = None + for key in d_this.keys(): + if not key in d_other: + d_new[key] = None + + storage = self.erase(d_new) + return storage + + def _symmetric_difference_wrapped(self, w_set, w_other): + newsetdata = newset(self.space) + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + newsetdata[w_item] = None + + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + if not w_set.has_key(w_item): + newsetdata[w_item] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(newsetdata) + + def _symmetric_difference_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = self._symmetric_difference_unwrapped(w_set, w_other) + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._symmetric_difference_wrapped(w_set, w_other) + return storage, strategy + + def symmetric_difference(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def symmetric_difference_update(self, w_set, w_other): + storage, strategy = self._symmetric_difference_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _intersect_base(self, w_set, w_other): + if self is w_other.strategy: + strategy = w_set.strategy + storage = strategy._intersect_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + strategy = self.space.fromcache(EmptySetStrategy) + storage = strategy.get_empty_storage() + else: + strategy = self.space.fromcache(ObjectSetStrategy) + storage = self._intersect_wrapped(w_set, w_other) + return storage, strategy + + def _intersect_wrapped(self, w_set, w_other): + result = newset(self.space) + for key in self.unerase(w_set.sstorage): + w_key = self.wrap(key) + if w_other.has_key(w_key): + result[w_key] = None + + strategy = self.space.fromcache(ObjectSetStrategy) + return strategy.erase(result) + + def _intersect_unwrapped(self, w_set, w_other): + result = self.get_empty_dict() + d_this = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_this: + if key in d_other: + result[key] = None + return self.erase(result) + + def intersect(self, w_set, w_other): + if w_set.length() > w_other.length(): + return w_other.intersect(w_set) + + storage, strategy = self._intersect_base(w_set, w_other) + return w_set.from_storage_and_strategy(storage, strategy) + + def intersect_update(self, w_set, w_other): + if w_set.length() > w_other.length(): + w_intersection = w_other.intersect(w_set) + strategy = w_intersection.strategy + storage = w_intersection.sstorage + else: + storage, strategy = self._intersect_base(w_set, w_other) + w_set.strategy = strategy + w_set.sstorage = storage + + def _issubset_unwrapped(self, w_set, w_other): + d_other = self.unerase(w_other.sstorage) + for item in self.unerase(w_set.sstorage): + if not item in d_other: + return False + return True + + def _issubset_wrapped(self, w_set, w_other): + for obj in self.unerase(w_set.sstorage): + w_item = self.wrap(obj) + if not w_other.has_key(w_item): + return False + return True + + def issubset(self, w_set, w_other): + if w_set.length() == 0: + return True + + if w_set.strategy is w_other.strategy: + return self._issubset_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return False + else: + return self._issubset_wrapped(w_set, w_other) + + def _isdisjoint_unwrapped(self, w_set, w_other): + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + for key in d_set: + if key in d_other: + return False + return True + + def _isdisjoint_wrapped(self, w_set, w_other): + d = self.unerase(w_set.sstorage) + for key in d: + if w_other.has_key(self.wrap(key)): + return False + return True + + def isdisjoint(self, w_set, w_other): + if w_other.length() == 0: + return True + if w_set.length() > w_other.length(): + return w_other.isdisjoint(w_set) + + if w_set.strategy is w_other.strategy: + return self._isdisjoint_unwrapped(w_set, w_other) + elif not w_set.strategy.may_contain_equal_elements(w_other.strategy): + return True + else: + return self._isdisjoint_wrapped(w_set, w_other) + + def update(self, w_set, w_other): + if self is w_other.strategy: + d_set = self.unerase(w_set.sstorage) + d_other = self.unerase(w_other.sstorage) + d_set.update(d_other) + return + + w_set.switch_to_object_strategy(self.space) + w_set.update(w_other) + + def popitem(self, w_set): + storage = self.unerase(w_set.sstorage) + try: + # this returns a tuple because internally sets are dicts + result = storage.popitem() + except KeyError: + # strategy may still be the same even if dict is empty + raise OperationError(self.space.w_KeyError, + self.space.wrap('pop from an empty set')) + return self.wrap(result[0]) + +class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("string") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_str(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + return type(w_key) is W_StringObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(IntegerSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.str_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return StringIteratorImplementation(self.space, self, w_set) + +class IntegerSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("integer") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def listview_int(self, w_set): + return self.unerase(w_set.sstorage).keys() + + def is_correct_type(self, w_key): + from pypy.objspace.std.intobject import W_IntObject + return type(w_key) is W_IntObject + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(StringSetStrategy): + return False + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return self.space.int_w(w_item) + + def wrap(self, item): + return self.space.wrap(item) + + def iter(self, w_set): + return IntegerIteratorImplementation(self.space, self, w_set) + +class ObjectSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("object") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase(self.get_empty_dict()) + + def get_empty_dict(self): + return newset(self.space) + + def is_correct_type(self, w_key): + return True + + def may_contain_equal_elements(self, strategy): + if strategy is self.space.fromcache(EmptySetStrategy): + return False + return True + + def unwrap(self, w_item): + return w_item + + def wrap(self, item): + return item + + def iter(self, w_set): + return RDictIteratorImplementation(self.space, self, w_set) + + def update(self, w_set, w_other): + d_obj = self.unerase(w_set.sstorage) + w_iterator = w_other.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break + d_obj[w_item] = None + +class IteratorImplementation(object): + def __init__(self, space, implementation): + self.space = space + self.setimplementation = implementation + self.len = implementation.length() + self.pos = 0 + + def next(self): + if self.setimplementation is None: + return None + if self.len != self.setimplementation.length(): + self.len = -1 # Make this error state sticky + raise OperationError(self.space.w_RuntimeError, + self.space.wrap("set changed size during iteration")) + # look for the next entry + if self.pos < self.len: + result = self.next_entry() + self.pos += 1 + return result + # no more entries + self.setimplementation = None + return None + + def next_entry(self): + """ Purely abstract method + """ + raise NotImplementedError + + def length(self): + if self.setimplementation is not None: + return self.len - self.pos + return 0 + +class EmptyIteratorImplementation(IteratorImplementation): + def next_entry(self): + return None + + +class StringIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, w_set): + IteratorImplementation.__init__(self, space, w_set) + d = strategy.unerase(w_set.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class IntegerIteratorImplementation(IteratorImplementation): + #XXX same implementation in dictmultiobject on dictstrategy-branch + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for key in self.iterator: + return self.space.wrap(key) + else: + return None + +class RDictIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + d = strategy.unerase(dictimplementation.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + # note that this 'for' loop only runs once, at most + for w_key in self.iterator: return w_key else: return None +class W_SetIterObject(W_Object): + from pypy.objspace.std.settype import setiter_typedef as typedef + # XXX this class should be killed, and the various + # iterimplementations should be W_Objects directly. + + def __init__(w_self, space, iterimplementation): + w_self.space = space + w_self.iterimplementation = iterimplementation + registerimplementation(W_SetIterObject) def iter__SetIterObject(space, w_setiter): return w_setiter def next__SetIterObject(space, w_setiter): - content = w_setiter.content - if content is not None: - if w_setiter.len != len(content): - w_setiter.len = -1 # Make this error state sticky - raise OperationError(space.w_RuntimeError, - space.wrap("Set changed size during iteration")) - # look for the next entry - w_result = w_setiter.next_entry() - if w_result is not None: - w_setiter.pos += 1 - return w_result - # no more entries - w_setiter.content = None + iterimplementation = w_setiter.iterimplementation + w_key = iterimplementation.next() + if w_key is not None: + return w_key raise OperationError(space.w_StopIteration, space.w_None) # XXX __length_hint__() @@ -116,107 +893,91 @@ def newset(space): return r_dict(space.eq_w, space.hash_w, force_non_null=True) -def make_setdata_from_w_iterable(space, w_iterable=None): - """Return a new r_dict with the content of w_iterable.""" +def set_strategy_and_setdata(space, w_set, w_iterable): + from pypy.objspace.std.intobject import W_IntObject + if w_iterable is None : + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + if isinstance(w_iterable, W_BaseSetObject): - return w_iterable.setdata.copy() - data = newset(space) - if w_iterable is not None: - for w_item in space.listview(w_iterable): - data[w_item] = None - return data + w_set.strategy = w_iterable.strategy + w_set.sstorage = w_iterable.get_storage_copy() + return + + stringlist = space.listview_str(w_iterable) + if stringlist is not None: + strategy = space.fromcache(StringSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + return + + intlist = space.listview_int(w_iterable) + if intlist is not None: + strategy = space.fromcache(IntegerSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) + return + + iterable_w = space.listview(w_iterable) + + if len(iterable_w) == 0: + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + return + + _pick_correct_strategy(space, w_set, iterable_w) + +def _pick_correct_strategy(space, w_set, iterable_w): + # check for integers + for w_item in iterable_w: + if type(w_item) is not W_IntObject: + break + else: + w_set.strategy = space.fromcache(IntegerSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + # check for strings + for w_item in iterable_w: + if type(w_item) is not W_StringObject: + break + else: + w_set.strategy = space.fromcache(StringSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + + w_set.strategy = space.fromcache(ObjectSetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) def _initialize_set(space, w_obj, w_iterable=None): - w_obj.setdata.clear() - if w_iterable is not None: - w_obj.setdata = make_setdata_from_w_iterable(space, w_iterable) + w_obj.clear() + set_strategy_and_setdata(space, w_obj, w_iterable) def _convert_set_to_frozenset(space, w_obj): - if space.isinstance_w(w_obj, space.w_set): - return W_FrozensetObject(space, - make_setdata_from_w_iterable(space, w_obj)) + if isinstance(w_obj, W_SetObject): + w_frozen = W_FrozensetObject(space, None) + w_frozen.strategy = w_obj.strategy + w_frozen.sstorage = w_obj.sstorage + return w_frozen + elif space.isinstance_w(w_obj, space.w_set): + w_frz = space.allocate_instance(W_FrozensetObject, space.w_frozenset) + W_FrozensetObject.__init__(w_frz, space, w_obj) + return w_frz else: return None -# helper functions for set operation on dicts - -def _is_eq(ld, rd): - if len(ld) != len(rd): - return False - for w_key in ld: - if w_key not in rd: - return False - return True - -def _difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - return result - -def _difference_dict_update(space, ld, rd): - if ld is rd: - ld.clear() # for the case 'a.difference_update(a)' - else: - for w_key in rd: - try: - del ld[w_key] - except KeyError: - pass - -def _intersection_dict(space, ld, rd): - result = newset(space) - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - result[w_key] = None - return result - -def _isdisjoint_dict(ld, rd): - if len(ld) > len(rd): - ld, rd = rd, ld # loop over the smaller dict - for w_key in ld: - if w_key in rd: - return False - return True - -def _symmetric_difference_dict(space, ld, rd): - result = newset(space) - for w_key in ld: - if w_key not in rd: - result[w_key] = None - for w_key in rd: - if w_key not in ld: - result[w_key] = None - return result - -def _issubset_dict(ldict, rdict): - if len(ldict) > len(rdict): - return False - - for w_key in ldict: - if w_key not in rdict: - return False - return True - - -#end helper functions - def set_update__Set(space, w_left, others_w): """Update a set with the union of itself and another.""" - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - ld.update(w_other.setdata) # optimization only + w_left.update(w_other) # optimization only else: for w_key in space.listview(w_other): - ld[w_key] = None + w_left.add(w_key) def inplace_or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - ld.update(rd) + w_left.update(w_other) return w_left inplace_or__Set_Frozenset = inplace_or__Set_Set @@ -226,10 +987,10 @@ This has no effect if the element is already present. """ - w_left.setdata[w_other] = None + w_left.add(w_other) def set_copy__Set(space, w_set): - return w_set._newobj(space, w_set.setdata.copy()) + return w_set.copy_real() def frozenset_copy__Frozenset(space, w_left): if type(w_left) is W_FrozensetObject: @@ -238,63 +999,51 @@ return set_copy__Set(space, w_left) def set_clear__Set(space, w_left): - w_left.setdata.clear() + w_left.clear() def sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + return w_left.difference(w_other) sub__Set_Frozenset = sub__Set_Set sub__Frozenset_Set = sub__Set_Set sub__Frozenset_Frozenset = sub__Set_Set def set_difference__Set(space, w_left, others_w): - result = w_left.setdata - if len(others_w) == 0: - result = result.copy() - for w_other in others_w: - if isinstance(w_other, W_BaseSetObject): - rd = w_other.setdata # optimization only - else: - rd = make_setdata_from_w_iterable(space, w_other) - result = _difference_dict(space, result, rd) - return w_left._newobj(space, result) + result = w_left.copy_real() + set_difference_update__Set(space, result, others_w) + return result frozenset_difference__Frozenset = set_difference__Set def set_difference_update__Set(space, w_left, others_w): - ld = w_left.setdata for w_other in others_w: if isinstance(w_other, W_BaseSetObject): # optimization only - _difference_dict_update(space, ld, w_other.setdata) + w_left.difference_update(w_other) else: - for w_key in space.listview(w_other): - try: - del ld[w_key] - except KeyError: - pass + w_other_as_set = w_left._newobj(space, w_other) + w_left.difference_update(w_other_as_set) def inplace_sub__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - _difference_dict_update(space, ld, rd) + w_left.difference_update(w_other) return w_left inplace_sub__Set_Frozenset = inplace_sub__Set_Set def eq__Set_Set(space, w_left, w_other): # optimization only (the general case is eq__Set_settypedef) - return space.wrap(_is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(w_left.equals(w_other)) eq__Set_Frozenset = eq__Set_Set eq__Frozenset_Frozenset = eq__Set_Set eq__Frozenset_Set = eq__Set_Set def eq__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(_is_eq(w_left.setdata, rd)) + # tested in test_buildinshortcut.py + #XXX do not make new setobject here + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(w_left.equals(w_other_as_set)) eq__Set_frozensettypedef = eq__Set_settypedef eq__Frozenset_settypedef = eq__Set_settypedef @@ -308,15 +1057,16 @@ eq__Frozenset_ANY = eq__Set_ANY def ne__Set_Set(space, w_left, w_other): - return space.wrap(not _is_eq(w_left.setdata, w_other.setdata)) + return space.wrap(not w_left.equals(w_other)) ne__Set_Frozenset = ne__Set_Set ne__Frozenset_Frozenset = ne__Set_Set ne__Frozenset_Set = ne__Set_Set def ne__Set_settypedef(space, w_left, w_other): - rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(not _is_eq(w_left.setdata, rd)) + #XXX this is not tested + w_other_as_set = w_left._newobj(space, w_other) + return space.wrap(not w_left.equals(w_other_as_set)) ne__Set_frozensettypedef = ne__Set_settypedef ne__Frozenset_settypedef = ne__Set_settypedef @@ -331,12 +1081,12 @@ def contains__Set_ANY(space, w_left, w_other): try: - return space.newbool(w_other in w_left.setdata) + return space.newbool(w_left.has_key(w_other)) except OperationError, e: if e.match(space, space.w_TypeError): w_f = _convert_set_to_frozenset(space, w_other) if w_f is not None: - return space.newbool(w_f in w_left.setdata) + return space.newbool(w_left.has_key(w_f)) raise contains__Frozenset_ANY = contains__Set_ANY @@ -345,19 +1095,23 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(ld, rd)) + if w_left.length() > w_other.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other)) set_issubset__Set_Frozenset = set_issubset__Set_Set frozenset_issubset__Frozenset_Set = set_issubset__Set_Set frozenset_issubset__Frozenset_Frozenset = set_issubset__Set_Set def set_issubset__Set_ANY(space, w_left, w_other): - if space.is_w(w_left, w_other): - return space.w_True + # not checking whether w_left is w_other here, because if that were the + # case the more precise multimethod would have applied. - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(ld, rd)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() > w_other_as_set.length(): + return space.w_False + return space.wrap(w_left.issubset(w_other_as_set)) frozenset_issubset__Frozenset_ANY = set_issubset__Set_ANY @@ -370,9 +1124,9 @@ # optimization only (the general case works too) if space.is_w(w_left, w_other): return space.w_True - - ld, rd = w_left.setdata, w_other.setdata - return space.wrap(_issubset_dict(rd, ld)) + if w_left.length() < w_other.length(): + return space.w_False + return space.wrap(w_other.issubset(w_left)) set_issuperset__Set_Frozenset = set_issuperset__Set_Set set_issuperset__Frozenset_Set = set_issuperset__Set_Set @@ -382,8 +1136,11 @@ if space.is_w(w_left, w_other): return space.w_True - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - return space.wrap(_issubset_dict(rd, ld)) + w_other_as_set = w_left._newobj(space, w_other) + + if w_left.length() < w_other_as_set.length(): + return space.w_False + return space.wrap(w_other_as_set.issubset(w_left)) frozenset_issuperset__Frozenset_ANY = set_issuperset__Set_ANY @@ -395,7 +1152,7 @@ # automatic registration of "lt(x, y)" as "not ge(y, x)" would not give the # correct answer here! def lt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) >= len(w_other.setdata): + if w_left.length() >= w_other.length(): return space.w_False else: return le__Set_Set(space, w_left, w_other) @@ -405,7 +1162,7 @@ lt__Frozenset_Frozenset = lt__Set_Set def gt__Set_Set(space, w_left, w_other): - if len(w_left.setdata) <= len(w_other.setdata): + if w_left.length() <= w_other.length(): return space.w_False else: return ge__Set_Set(space, w_left, w_other) @@ -421,26 +1178,19 @@ Returns True if successfully removed. """ try: - del w_left.setdata[w_item] - return True - except KeyError: - return False + deleted = w_left.remove(w_item) except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_f = _convert_set_to_frozenset(space, w_item) - if w_f is None: - raise + else: + w_f = _convert_set_to_frozenset(space, w_item) + if w_f is None: + raise + deleted = w_left.remove(w_f) - try: - del w_left.setdata[w_f] - return True - except KeyError: - return False - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return False + if w_left.length() == 0: + w_left.switch_to_empty_strategy() + return deleted def set_discard__Set_ANY(space, w_left, w_item): _discard_from_set(space, w_left, w_item) @@ -454,8 +1204,12 @@ if w_set.hash != 0: return space.wrap(w_set.hash) hash = r_uint(1927868237) - hash *= r_uint(len(w_set.setdata) + 1) - for w_item in w_set.setdata: + hash *= r_uint(w_set.length() + 1) + w_iterator = w_set.iter() + while True: + w_item = w_iterator.next_entry() + if w_item is None: + break h = space.hash_w(w_item) value = (r_uint(h ^ (h << 16) ^ 89869747) * multi) hash = hash ^ value @@ -468,71 +1222,75 @@ return space.wrap(hash) def set_pop__Set(space, w_left): - try: - w_key, _ = w_left.setdata.popitem() - except KeyError: - raise OperationError(space.w_KeyError, - space.wrap('pop from an empty set')) - return w_key + return w_left.popitem() def and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + new_set = w_left.intersect(w_other) + return new_set and__Set_Frozenset = and__Set_Set and__Frozenset_Set = and__Set_Set and__Frozenset_Frozenset = and__Set_Set -def _intersection_multiple(space, w_left, others_w): - result = w_left.setdata - for w_other in others_w: +def set_intersection__Set(space, w_left, others_w): + #XXX find smarter implementations + others_w = [w_left] + others_w + + # find smallest set in others_w to reduce comparisons + startindex, startlength = 0, -1 + for i in range(len(others_w)): + w_other = others_w[i] + try: + length = space.int_w(space.len(w_other)) + except OperationError, e: + if (e.match(space, space.w_TypeError) or + e.match(space, space.w_AttributeError)): + continue + raise + + if startlength == -1 or length < startlength: + startindex = i + startlength = length + + others_w[startindex], others_w[0] = others_w[0], others_w[startindex] + + result = w_left._newobj(space, others_w[0]) + for i in range(1,len(others_w)): + w_other = others_w[i] if isinstance(w_other, W_BaseSetObject): # optimization only - result = _intersection_dict(space, result, w_other.setdata) + result.intersect_update(w_other) else: - result2 = newset(space) - for w_key in space.listview(w_other): - if w_key in result: - result2[w_key] = None - result = result2 + w_other_as_set = w_left._newobj(space, w_other) + result.intersect_update(w_other_as_set) return result -def set_intersection__Set(space, w_left, others_w): - if len(others_w) == 0: - result = w_left.setdata.copy() - else: - result = _intersection_multiple(space, w_left, others_w) - return w_left._newobj(space, result) - frozenset_intersection__Frozenset = set_intersection__Set def set_intersection_update__Set(space, w_left, others_w): - result = _intersection_multiple(space, w_left, others_w) - w_left.setdata = result + result = set_intersection__Set(space, w_left, others_w) + w_left.strategy = result.strategy + w_left.sstorage = result.sstorage + return def inplace_and__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - new_ld = _intersection_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.intersect_update(w_other) return w_left inplace_and__Set_Frozenset = inplace_and__Set_Set def set_isdisjoint__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - disjoint = _isdisjoint_dict(ld, rd) - return space.newbool(disjoint) + return space.newbool(w_left.isdisjoint(w_other)) set_isdisjoint__Set_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Frozenset = set_isdisjoint__Set_Set set_isdisjoint__Frozenset_Set = set_isdisjoint__Set_Set def set_isdisjoint__Set_ANY(space, w_left, w_other): - ld = w_left.setdata + #XXX may be optimized when other strategies are added for w_key in space.listview(w_other): - if w_key in ld: + if w_left.has_key(w_key): return space.w_False return space.w_True @@ -540,9 +1298,8 @@ def set_symmetric_difference__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_result = w_left.symmetric_difference(w_other) + return w_result set_symmetric_difference__Set_Frozenset = set_symmetric_difference__Set_Set set_symmetric_difference__Frozenset_Set = set_symmetric_difference__Set_Set @@ -556,26 +1313,23 @@ def set_symmetric_difference__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - return w_left._newobj(space, new_ld) + w_other_as_set = w_left._newobj(space, w_other) + w_result = w_left.symmetric_difference(w_other_as_set) + return w_result frozenset_symmetric_difference__Frozenset_ANY = \ set_symmetric_difference__Set_ANY def set_symmetric_difference_update__Set_Set(space, w_left, w_other): # optimization only (the general case works too) - ld, rd = w_left.setdata, w_other.setdata - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_left.symmetric_difference_update(w_other) set_symmetric_difference_update__Set_Frozenset = \ set_symmetric_difference_update__Set_Set def set_symmetric_difference_update__Set_ANY(space, w_left, w_other): - ld, rd = w_left.setdata, make_setdata_from_w_iterable(space, w_other) - new_ld = _symmetric_difference_dict(space, ld, rd) - w_left.setdata = new_ld + w_other_as_set = w_left._newobj(space, w_other) + w_left.symmetric_difference_update(w_other_as_set) def inplace_xor__Set_Set(space, w_left, w_other): set_symmetric_difference_update__Set_Set(space, w_left, w_other) @@ -584,34 +1338,33 @@ inplace_xor__Set_Frozenset = inplace_xor__Set_Set def or__Set_Set(space, w_left, w_other): - ld, rd = w_left.setdata, w_other.setdata - result = ld.copy() - result.update(rd) - return w_left._newobj(space, result) + w_copy = w_left.copy_real() + w_copy.update(w_other) + return w_copy or__Set_Frozenset = or__Set_Set or__Frozenset_Set = or__Set_Set or__Frozenset_Frozenset = or__Set_Set def set_union__Set(space, w_left, others_w): - result = w_left.setdata.copy() + result = w_left.copy_real() for w_other in others_w: if isinstance(w_other, W_BaseSetObject): - result.update(w_other.setdata) # optimization only + result.update(w_other) # optimization only else: for w_key in space.listview(w_other): - result[w_key] = None - return w_left._newobj(space, result) + result.add(w_key) + return result frozenset_union__Frozenset = set_union__Set def len__Set(space, w_left): - return space.newint(len(w_left.setdata)) + return space.newint(w_left.length()) len__Frozenset = len__Set def iter__Set(space, w_left): - return W_SetIterObject(w_left.setdata) + return W_SetIterObject(space, w_left.iter()) iter__Frozenset = iter__Set diff --git a/pypy/objspace/std/settype.py b/pypy/objspace/std/settype.py --- a/pypy/objspace/std/settype.py +++ b/pypy/objspace/std/settype.py @@ -68,7 +68,7 @@ def descr__new__(space, w_settype, __args__): from pypy.objspace.std.setobject import W_SetObject, newset w_obj = space.allocate_instance(W_SetObject, w_settype) - W_SetObject.__init__(w_obj, space, newset(space)) + W_SetObject.__init__(w_obj, space) return w_obj set_typedef = StdTypeDef("set", diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -37,6 +37,20 @@ return None return space.wrap(compute_unique_id(space.str_w(self))) + def unicode_w(w_self, space): + # Use the default encoding. + from pypy.objspace.std.unicodetype import unicode_from_string, \ + decode_object + w_defaultencoding = space.call_function(space.sys.get( + 'getdefaultencoding')) + from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ + unicode_from_string, decode_object + encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, + space.w_None) + if encoding is None and errors is None: + return space.unicode_w(unicode_from_string(space, w_self)) + return space.unicode_w(decode_object(space, w_self, encoding, errors)) + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef @@ -55,19 +69,13 @@ def str_w(w_self, space): return w_self._value - def unicode_w(w_self, space): - # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, w_self)) - return space.unicode_w(decode_object(space, w_self, encoding, errors)) + def listview_str(w_self): + return _create_list_from_string(w_self._value) + +def _create_list_from_string(value): + # need this helper function to allow the jit to look inside and inline + # listview_str + return [s for s in value] registerimplementation(W_StringObject) diff --git a/pypy/objspace/std/test/test_builtinshortcut.py b/pypy/objspace/std/test/test_builtinshortcut.py --- a/pypy/objspace/std/test/test_builtinshortcut.py +++ b/pypy/objspace/std/test/test_builtinshortcut.py @@ -85,6 +85,20 @@ def setup_class(cls): from pypy import conftest cls.space = conftest.gettestobjspace(**WITH_BUILTINSHORTCUT) + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint class AppTestString(test_stringobject.AppTestStringObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -131,6 +131,45 @@ assert self.space.eq_w(space.call_function(get, w("33")), w(None)) assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) + def test_fromkeys_fastpath(self): + space = self.space + w = space.wrap + + w_l = self.space.newlist([w("a"),w("b")]) + w_l.getitems = None + w_d = space.call_method(space.w_dict, "fromkeys", w_l) + + assert space.eq_w(w_d.getitem_str("a"), space.w_None) + assert space.eq_w(w_d.getitem_str("b"), space.w_None) + + def test_listview_str_dict(self): + w = self.space.wrap + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) + + assert self.space.listview_str(w_d) == ["a", "b"] + + def test_listview_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + assert self.space.listview_int(w_d) == [1, 2] + + def test_keys_on_string_int_dict(self): + w = self.space.wrap + w_d = self.space.newdict() + w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_int(w_l)) == [1,2] + + w_d = self.space.newdict() + w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) + + w_l = self.space.call_method(w_d, "keys") + assert sorted(self.space.listview_str(w_l)) == ["a", "b"] class AppTest_DictObject: def setup_class(cls): @@ -793,7 +832,9 @@ return x == y eq_w = eq def newlist(self, l): - return [] + return l + def newlist_str(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): @@ -933,7 +974,7 @@ def test_keys(self): self.fill_impl() - keys = self.impl.keys() + keys = self.impl.w_keys() # wrapped lists = lists in the fake space keys.sort() assert keys == [self.string, self.string2] self.check_not_devolved() @@ -1011,8 +1052,8 @@ d.setitem("s", 12) d.delitem(F()) - assert "s" not in d.keys() - assert F() not in d.keys() + assert "s" not in d.w_keys() + assert F() not in d.w_keys() class TestStrDictImplementation(BaseTestRDictImplementation): StrategyClass = StringDictStrategy diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -486,6 +486,14 @@ list.__init__(l, ['a', 'b', 'c']) assert l is l0 assert l == ['a', 'b', 'c'] + list.__init__(l) + assert l == [] + + def test_explicit_new_init_more_cases(self): + for assignment in [[], (), [3], ["foo"]]: + l = [1, 2] + l.__init__(assignment) + assert l == list(assignment) def test_extend_list(self): l = l0 = [1] @@ -1173,6 +1181,20 @@ assert l == [] assert list(g) == [] + def test_uses_custom_iterator(self): + # obscure corner case: space.listview*() must not shortcut subclasses + # of dicts, because the OrderedDict in the stdlib relies on this. + # we extend the use case to lists and sets, i.e. all types that have + # strategies, to avoid surprizes depending on the strategy. + for base, arg in [(list, []), (list, [5]), (list, ['x']), + (set, []), (set, [5]), (set, ['x']), + (dict, []), (dict, [(5,6)]), (dict, [('x',7)])]: + print base, arg + class SubClass(base): + def __iter__(self): + return iter("foobar") + assert list(SubClass(arg)) == ['f', 'o', 'o', 'b', 'a', 'r'] + class AppTestForRangeLists(AppTestW_ListObject): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -420,7 +420,7 @@ def test_listview_str(self): space = self.space - assert space.listview_str(space.wrap("a")) is None + assert space.listview_str(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) assert space.listview_str(w_l) == ["a", "b"] @@ -463,6 +463,44 @@ w_res = listobject.list_pop__List_ANY(space, w_l, space.w_None) # does not crash assert space.unwrap(w_res) == 3 + def test_create_list_from_set(self): + from pypy.objspace.std.setobject import W_SetObject + from pypy.objspace.std.setobject import _initialize_set + + space = self.space + w = space.wrap + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_l) + w_set.iter = None # make sure fast path is used + + w_l2 = W_ListObject(space, []) + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b"), space.wrap("c")]) + _initialize_set(self.space, w_set, w_l) + + space.call_method(w_l2, "__init__", w_set) + + w_l2.sort(False) + assert space.eq_w(w_l, w_l2) + + + def test_listview_str_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) + assert self.space.listview_str(w_l) == ["a", "b"] + + def test_listview_int_list(self): + space = self.space + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2), space.wrap(3)]) + assert self.space.listview_int(w_l) == [1, 2, 3] + class TestW_ListStrategiesDisabled: def setup_class(cls): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -265,4 +265,7 @@ space = objspace.StdObjSpace() w_a = space.wrap("a") space.type = None + # if it crashes, it means that space._type_isinstance didn't go through + # the fast path, and tries to call type() (which is set to None just + # above) space.isinstance_w(w_a, space.w_str) # does not crash diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,12 +8,14 @@ is not too wrong. """ import py.test -from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject +from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set -from pypy.objspace.std.setobject import newset, make_setdata_from_w_iterable +from pypy.objspace.std.setobject import newset from pypy.objspace.std.setobject import and__Set_Set from pypy.objspace.std.setobject import set_intersection__Set from pypy.objspace.std.setobject import eq__Set_Set +from pypy.conftest import gettestobjspace +from pypy.objspace.std.listobject import W_ListObject letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' @@ -29,12 +31,11 @@ self.false = self.space.w_False def test_and(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t0 = W_SetObject(self.space, newset(self.space)) + t0 = W_SetObject(self.space) _initialize_set(self.space, t0, self.otherword) - t1 = W_FrozensetObject(self.space, - make_setdata_from_w_iterable(self.space, self.otherword)) + t1 = W_FrozensetObject(self.space, self.otherword) r0 = and__Set_Set(self.space, s, t0) r1 = and__Set_Set(self.space, s, t1) assert eq__Set_Set(self.space, r0, r1) == self.true @@ -42,9 +43,9 @@ assert eq__Set_Set(self.space, r0, sr) == self.true def test_compare(self): - s = W_SetObject(self.space, newset(self.space)) + s = W_SetObject(self.space) _initialize_set(self.space, s, self.word) - t = W_SetObject(self.space, newset(self.space)) + t = W_SetObject(self.space) _initialize_set(self.space, t, self.word) assert self.space.eq_w(s,t) u = self.space.wrap(set('simsalabim')) @@ -54,7 +55,247 @@ s = self.space.newset() assert self.space.str_w(self.space.repr(s)) == 'set([])' + def test_intersection_order(self): + # theses tests make sure that intersection is done in the correct order + # (smallest first) + space = self.space + a = W_SetObject(self.space) + _initialize_set(self.space, a, self.space.wrap("abcdefg")) + a.intersect = None + + b = W_SetObject(self.space) + _initialize_set(self.space, b, self.space.wrap("abc")) + + result = set_intersection__Set(space, a, [b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("abc")))) + + c = W_SetObject(self.space) + _initialize_set(self.space, c, self.space.wrap("e")) + + d = W_SetObject(self.space) + _initialize_set(self.space, d, self.space.wrap("ab")) + + # if ordering works correct we should start with set e + a.get_storage_copy = None + b.get_storage_copy = None + d.get_storage_copy = None + + result = set_intersection__Set(space, a, [d,c,b]) + assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) + + def test_create_set_from_list(self): + from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy + from pypy.objspace.std.floatobject import W_FloatObject + from pypy.objspace.std.model import W_Object + + w = self.space.wrap + intstr = self.space.fromcache(IntegerSetStrategy) + tmp_func = intstr.get_storage_from_list + # test if get_storage_from_list is no longer used + intstr.get_storage_from_list = None + + w_list = W_ListObject(self.space, [w(1), w(2), w(3)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is intstr + assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} + + w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} + + w_list = W_ListObject(self.space, [w("1"), w(2), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_Object) + + w_list = W_ListObject(self.space, [w(1.0), w(2.0), w(3.0)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_FloatObject) + + # changed cached object, need to change it back for other tests to pass + intstr.get_storage_from_list = tmp_func + + def test_listview_str_int_on_set(self): + w = self.space.wrap + + w_a = W_SetObject(self.space) + _initialize_set(self.space, w_a, w("abcdefg")) + assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert self.space.listview_int(w_a) is None + + w_b = W_SetObject(self.space) + _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) + assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] + assert self.space.listview_str(w_b) is None + class AppTestAppSetTest: + + def setup_class(self): + self.space = gettestobjspace() + w_fakeint = self.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + self.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + def test_simple(self): + a = set([1,2,3]) + b = set() + b.add(4) + c = a.union(b) + assert c == set([1,2,3,4]) + + def test_generator(self): + def foo(): + for i in [1,2,3,4,5]: + yield i + b = set(foo()) + assert b == set([1,2,3,4,5]) + + a = set(x for x in [1,2,3]) + assert a == set([1,2,3]) + + def test_generator2(self): + def foo(): + for i in [1,2,3]: + yield i + class A(set): + pass + a = A([1,2,3,4,5]) + b = a.difference(foo()) + assert b == set([4,5]) + + def test_or(self): + a = set([0,1,2]) + b = a | set([1,2,3]) + assert b == set([0,1,2,3]) + + # test inplace or + a |= set([1,2,3]) + assert a == b + + def test_clear(self): + a = set([1,2,3]) + a.clear() + assert a == set() + + def test_sub(self): + a = set([1,2,3,4,5]) + b = set([2,3,4]) + a - b == [1,5] + a.__sub__(b) == [1,5] + + #inplace sub + a = set([1,2,3,4]) + b = set([1,4]) + a -= b + assert a == set([2,3]) + + def test_issubset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert b.issubset(a) + c = [1,2,3,4] + assert b.issubset(c) + + a = set([1,2,3,4]) + b = set(['1','2']) + assert not b.issubset(a) + + def test_issuperset(self): + a = set([1,2,3,4]) + b = set([2,3]) + assert a.issuperset(b) + c = [2,3] + assert a.issuperset(c) + + c = [1,1,1,1,1] + assert a.issuperset(c) + assert set([1,1,1,1,1]).issubset(a) + + a = set([1,2,3]) + assert a.issuperset(a) + assert not a.issuperset(set([1,2,3,4,5])) + + def test_inplace_and(test): + a = set([1,2,3,4]) + b = set([0,2,3,5,6]) + a &= b + assert a == set([2,3]) + + def test_discard_remove(self): + a = set([1,2,3,4,5]) + a.remove(1) + assert a == set([2,3,4,5]) + a.discard(2) + assert a == set([3,4,5]) + + raises(KeyError, "a.remove(6)") + + def test_pop(self): + b = set() + raises(KeyError, "b.pop()") + + a = set([1,2,3,4,5]) + for i in xrange(5): + a.pop() + assert a == set() + raises(KeyError, "a.pop()") + + def test_symmetric_difference(self): + a = set([1,2,3]) + b = set([3,4,5]) + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + c = a.symmetric_difference(b) + assert c == set([1,2,4,5]) + + a = set([1,2,3]) + b = set('abc') + c = a.symmetric_difference(b) + assert c == set([1,2,3,'a','b','c']) + + def test_symmetric_difference_update(self): + a = set([1,2,3]) + b = set([3,4,5]) + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = [3,4,5] + a.symmetric_difference_update(b) + assert a == set([1,2,4,5]) + + a = set([1,2,3]) + b = set([3,4,5]) + a ^= b + assert a == set([1,2,4,5]) + def test_subtype(self): class subset(set):pass a = subset() @@ -131,6 +372,8 @@ assert (set('abc') != set('abcd')) assert (frozenset('abc') != frozenset('abcd')) assert (frozenset('abc') != set('abcd')) + assert set() != set('abc') + assert set('abc') != set('abd') def test_libpython_equality(self): for thetype in [frozenset, set]: @@ -178,6 +421,9 @@ s1 = set('abc') s1.update('d', 'ef', frozenset('g')) assert s1 == set('abcdefg') + s1 = set() + s1.update(set('abcd')) + assert s1 == set('abcd') def test_recursive_repr(self): class A(object): @@ -330,6 +576,7 @@ assert not set([1,2,5]).isdisjoint(frozenset([4,5,6])) assert not set([1,2,5]).isdisjoint([4,5,6]) assert not set([1,2,5]).isdisjoint((4,5,6)) + assert set([1,2,3]).isdisjoint(set([3.5,4.0])) def test_intersection(self): assert set([1,2,3]).intersection(set([2,3,4])) == set([2,3]) @@ -347,6 +594,35 @@ assert s.intersection() == s assert s.intersection() is not s + def test_intersection_swap(self): + s1 = s3 = set([1,2,3,4,5]) + s2 = set([2,3,6,7]) + s1 &= s2 + assert s1 == set([2,3]) + assert s3 == set([2,3]) + + def test_intersection_generator(self): + def foo(): + for i in range(5): + yield i + + s1 = s2 = set([1,2,3,4,5,6]) + assert s1.intersection(foo()) == set([1,2,3,4]) + s1.intersection_update(foo()) + assert s1 == set([1,2,3,4]) + assert s2 == set([1,2,3,4]) + + def test_intersection_string(self): + s = set([1,2,3]) + o = 'abc' + assert s.intersection(o) == set() + + def test_intersection_float(self): + a = set([1,2,3]) + b = set([3.0,4.0,5.0]) + c = a.intersection(b) + assert c == set([3.0]) + def test_difference(self): assert set([1,2,3]).difference(set([2,3,4])) == set([1]) assert set([1,2,3]).difference(frozenset([2,3,4])) == set([1]) @@ -361,6 +637,9 @@ s = set([1,2,3]) assert s.difference() == s assert s.difference() is not s + assert set([1,2,3]).difference(set([2,3,4,'5'])) == set([1]) + assert set([1,2,3,'5']).difference(set([2,3,4])) == set([1,'5']) + assert set().difference(set([1,2,3])) == set() def test_intersection_update(self): s = set([1,2,3,4,7]) @@ -381,3 +660,250 @@ assert s == set([2,3]) s.difference_update(s) assert s == set([]) + + def test_empty_empty(self): + assert set() == set([]) + + def test_empty_difference(self): + e = set() + x = set([1,2,3]) + assert e.difference(x) == set() + assert x.difference(e) == x + + e.difference_update(x) + assert e == set() + x.difference_update(e) + assert x == set([1,2,3]) + + assert e.symmetric_difference(x) == x + assert x.symmetric_difference(e) == x + + e.symmetric_difference_update(e) + assert e == e + e.symmetric_difference_update(x) + assert e == x + + x.symmetric_difference_update(set()) + assert x == set([1,2,3]) + + def test_fastpath_with_strategies(self): + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.difference(b) == a + assert b.difference(a) == b + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.intersection(b) == set() + assert b.intersection(a) == set() + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert not a.issubset(b) + assert not b.issubset(a) + + a = set([1,2,3]) + b = set(["a","b","c"]) + assert a.isdisjoint(b) + assert b.isdisjoint(a) + + def test_empty_intersect(self): + e = set() + x = set([1,2,3]) + assert e.intersection(x) == e + assert x.intersection(e) == e + assert e & x == e + assert x & e == e + + e.intersection_update(x) + assert e == set() + e &= x + assert e == set() + x.intersection_update(e) + assert x == set() + + def test_empty_issuper(self): + e = set() + x = set([1,2,3]) + assert e.issuperset(e) == True + assert e.issuperset(x) == False + assert x.issuperset(e) == True + + assert e.issuperset(set()) + assert e.issuperset([]) + + def test_empty_issubset(self): + e = set() + x = set([1,2,3]) + assert e.issubset(e) == True + assert e.issubset(x) == True + assert x.issubset(e) == False + assert e.issubset([]) + + def test_empty_isdisjoint(self): + e = set() + x = set([1,2,3]) + assert e.isdisjoint(e) == True + assert e.isdisjoint(x) == True + assert x.isdisjoint(e) == True + + def test_empty_unhashable(self): + s = set() + raises(TypeError, s.difference, [[]]) + raises(TypeError, s.difference_update, [[]]) + raises(TypeError, s.intersection, [[]]) + raises(TypeError, s.intersection_update, [[]]) + raises(TypeError, s.symmetric_difference, [[]]) + raises(TypeError, s.symmetric_difference_update, [[]]) + raises(TypeError, s.update, [[]]) + + def test_super_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).issuperset(foo()) + + def test_isdisjoint_with_generator(self): + def foo(): + for i in [1,2,3]: + yield i + set([1,2,3,4,5]).isdisjoint(foo()) + + def test_fakeint_and_equals(self): + s1 = set([1,2,3,4]) + s2 = set([1,2,self.FakeInt(3), 4]) + assert s1 == s2 + + def test_fakeint_and_discard(self): + # test with object strategy + s = set([1, 2, 'three', 'four']) + s.discard(self.FakeInt(2)) + assert s == set([1, 'three', 'four']) + + s.remove(self.FakeInt(1)) + assert s == set(['three', 'four']) + raises(KeyError, s.remove, self.FakeInt(16)) + + # test with int strategy + s = set([1,2,3,4]) + s.discard(self.FakeInt(4)) + assert s == set([1,2,3]) + s.remove(self.FakeInt(3)) + assert s == set([1,2]) + raises(KeyError, s.remove, self.FakeInt(16)) + + def test_fakeobject_and_has_key(self): + s = set([1,2,3,4,5]) + assert 5 in s + assert self.FakeInt(5) in s + + def test_fakeobject_and_pop(self): + s = set([1,2,3,self.FakeInt(4),5]) + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s.pop() + assert s == set([]) + + def test_fakeobject_and_difference(self): + s = set([1,2,'3',4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set(['3',4]) + + s = set([1,2,3,4]) + s.difference_update([self.FakeInt(1), self.FakeInt(2)]) + assert s == set([3,4]) + + def test_frozenset_behavior(self): + s = set([1,2,3,frozenset([4])]) + raises(TypeError, s.difference_update, [1,2,3,set([4])]) + + s = set([1,2,3,frozenset([4])]) + s.discard(set([4])) + assert s == set([1,2,3]) + + def test_discard_unhashable(self): + s = set([1,2,3,4]) + raises(TypeError, s.discard, [1]) + + def test_discard_evil_compare(self): + class Evil(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if isinstance(other, frozenset): + raise TypeError + if other == self.value: + return True + return False + s = set([1,2, Evil(frozenset([1]))]) + raises(TypeError, s.discard, set([1])) + + def test_create_set_from_set(self): + # no sharing + x = set([1,2,3]) + y = set(x) + a = x.pop() + assert y == set([1,2,3]) + assert len(x) == 2 + assert x.union(set([a])) == y + + def test_never_change_frozenset(self): + a = frozenset([1,2]) + b = a.copy() + assert a is b + + a = frozenset([1,2]) + b = a.union(set([3,4])) + assert b == set([1,2,3,4]) + assert a == set([1,2]) + + a = frozenset() + b = a.union(set([3,4])) + assert b == set([3,4]) + assert a == set() + + a = frozenset([1,2])#multiple + b = a.union(set([3,4]),[5,6]) + assert b == set([1,2,3,4,5,6]) + assert a == set([1,2]) + + a = frozenset([1,2,3]) + b = a.difference(set([3,4,5])) + assert b == set([1,2]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.difference(set([3]), [2]) + assert b == set([1]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.symmetric_difference(set([3,4,5])) + assert b == set([1,2,4,5]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3]) + b = a.intersection(set([3,4,5])) + assert b == set([3]) + assert a == set([1,2,3]) + + a = frozenset([1,2,3])#multiple + b = a.intersection(set([2,3,4]), [2]) + assert b == set([2]) + assert a == set([1,2,3]) + + raises(AttributeError, "frozenset().update()") + raises(AttributeError, "frozenset().difference_update()") + raises(AttributeError, "frozenset().symmetric_difference_update()") + raises(AttributeError, "frozenset().intersection_update()") + + def test_intersection_obj(self): + class Obj: + def __getitem__(self, i): + return [5, 3, 4][i] + s = set([10,3,2]).intersection(Obj()) + assert list(s) == [3] diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -0,0 +1,107 @@ +from pypy.objspace.std.setobject import W_SetObject +from pypy.objspace.std.setobject import IntegerSetStrategy, ObjectSetStrategy, EmptySetStrategy +from pypy.objspace.std.listobject import W_ListObject + +class TestW_SetStrategies: + + def wrapped(self, l): + return W_ListObject(self.space, [self.space.wrap(x) for x in l]) + + def test_from_list(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert s.strategy is self.space.fromcache(IntegerSetStrategy) + + s = W_SetObject(self.space, self.wrapped([1,"two",3,"four",5])) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s = W_SetObject(self.space) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + s = W_SetObject(self.space, self.wrapped([])) + assert s.strategy is self.space.fromcache(EmptySetStrategy) + + def test_switch_to_object(self): + s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s.add(self.space.wrap("six")) + assert s.strategy is self.space.fromcache(ObjectSetStrategy) + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_symmetric_difference(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped(["six", "seven"])) + s1.symmetric_difference_update(s2) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_intersection(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5, "six", "seven"])) + s3 = s1.intersect(s2) + skip("for now intersection with ObjectStrategy always results in another ObjectStrategy") + assert s3.strategy is self.space.fromcache(IntegerSetStrategy) + + def test_clear(self): + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s1.clear() + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_remove(self): + from pypy.objspace.std.setobject import set_remove__Set_ANY + s1 = W_SetObject(self.space, self.wrapped([1])) + set_remove__Set_ANY(self.space, s1, self.space.wrap(1)) + assert s1.strategy is self.space.fromcache(EmptySetStrategy) + + def test_union(self): + from pypy.objspace.std.setobject import set_union__Set + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + s2 = W_SetObject(self.space, self.wrapped([4,5,6,7])) + s3 = W_SetObject(self.space, self.wrapped([4,'5','6',7])) + s4 = set_union__Set(self.space, s1, [s2]) + s5 = set_union__Set(self.space, s1, [s3]) + assert s4.strategy is self.space.fromcache(IntegerSetStrategy) + assert s5.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_discard(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + set_discard__Set_ANY(self.space, s1, self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + set_discard__Set_ANY(self.space, s1, self.space.wrap(FakeInt(5))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) + + def test_has_key(self): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + def __eq__(self, other): + if other == self.value: + return True + return False + + from pypy.objspace.std.setobject import set_discard__Set_ANY + + s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) + assert not s1.has_key(self.space.wrap("five")) + skip("currently not supported") + assert s1.strategy is self.space.fromcache(IntegerSetStrategy) + + assert s1.has_key(self.space.wrap(FakeInt(2))) + assert s1.strategy is self.space.fromcache(ObjectSetStrategy) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -85,6 +85,10 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) + def test_listview_str(self): + w_str = self.space.wrap('abcd') + assert self.space.listview_str(w_str) == list("abcd") + class AppTestStringObject: def test_format_wrongchar(self): diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -244,8 +244,12 @@ skip("disabled") if self.runappdirect: total = 500000 + def rand(): + import random + return random.randrange(0, 5) else: total = 50 + rand = self.rand # class A(object): hash = None @@ -256,7 +260,7 @@ a = A() a.next = tail.next tail.next = a - for j in range(self.rand()): + for j in range(rand()): any = any.next if any.hash is None: any.hash = hash(any) diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py --- a/pypy/rlib/_rffi_stacklet.py +++ b/pypy/rlib/_rffi_stacklet.py @@ -14,7 +14,7 @@ includes = ['src/stacklet/stacklet.h'], separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'], ) -if sys.platform == 'win32': +if 'masm' in dir(eci.platform): # Microsoft compiler if is_emulated_long: asmsrc = 'switch_x64_msvc.asm' else: diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -58,12 +58,12 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] if _MSVC: header_lines.extend([ - '#include ', # these types do not exist on microsoft compilers 'typedef int ssize_t;', 'typedef unsigned __int16 uint16_t;', @@ -71,6 +71,7 @@ ]) else: # MINGW includes = ('stdint.h',) + """ header_lines.extend([ '''\ #ifndef _WIN32_WINNT @@ -88,6 +89,7 @@ u_long keepaliveinterval; };''' ]) + """ HEADER = '\n'.join(header_lines) COND_HEADER = '' constants = {} diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -114,9 +114,10 @@ ) eci = rffi_platform.configure_external_library( - 'libffi', eci, + 'libffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), + dict(prefix=r'c:\mingw64', include_dir='include', library_dir='lib'), ]) else: libffidir = py.path.local(pypydir).join('translator', 'c', 'src', 'libffi_msvc') diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -6,6 +6,7 @@ in which it does not work. """ +from __future__ import with_statement from pypy.annotation import model as annmodel from pypy.rlib.rarithmetic import r_int64 from pypy.rpython.lltypesystem import lltype, rffi @@ -20,7 +21,7 @@ FLOAT_ARRAY_PTR = lltype.Ptr(lltype.Array(rffi.FLOAT)) # these definitions are used only in tests, when not translated -def longlong2float_emulator(llval): +def longlong2float(llval): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) ll_array[0] = llval @@ -50,12 +51,6 @@ eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" -static double pypy__longlong2float(long long x) { - double dd; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&dd, &x, 8); - return dd; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -70,12 +65,6 @@ } """]) -longlong2float = rffi.llexternal( - "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, - _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__longlong2float") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -98,4 +87,17 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) - return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) + hop.exception_cannot_occur() + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) + +class LongLong2FloatEntry(ExtRegistryEntry): + _about_ = longlong2float + + def compute_result_annotation(self, s_longlong): + assert annmodel.SomeInteger(knowntype=r_int64).contains(s_longlong) + return annmodel.SomeFloat() + + def specialize_call(self, hop): + [v_longlong] = hop.inputargs(lltype.SignedLongLong) + hop.exception_cannot_occur() + return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/parsing/pypackrat.py b/pypy/rlib/parsing/pypackrat.py --- a/pypy/rlib/parsing/pypackrat.py +++ b/pypy/rlib/parsing/pypackrat.py @@ -1,6 +1,8 @@ from pypy.rlib.parsing.tree import Nonterminal, Symbol -from makepackrat import PackratParser, BacktrackException, Status +from pypy.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status + + class Parser(object): def NAME(self): return self._NAME().result diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -711,9 +711,9 @@ free = c_munmap_safe elif _MS_WINDOWS: - def mmap(fileno, length, flags=0, tagname="", access=_ACCESS_DEFAULT, offset=0): + def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0): # XXX flags is or-ed into access by now. - + flags = 0 # check size boundaries _check_map_size(length) map_size = length diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -141,6 +141,10 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include + #ifdef __GNUC__ + #define _dosmaperr mingw_dosmaperr + #endif int main() { int i; diff --git a/pypy/rlib/rzipfile.py b/pypy/rlib/rzipfile.py --- a/pypy/rlib/rzipfile.py +++ b/pypy/rlib/rzipfile.py @@ -12,8 +12,7 @@ rzlib = None # XXX hack to get crc32 to work -from pypy.tool.lib_pypy import import_from_lib_pypy -crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab +from pypy.module.binascii.interp_crc32 import crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] diff --git a/pypy/rlib/test/autopath.py b/pypy/rlib/test/autopath.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/autopath.py @@ -0,0 +1,131 @@ +""" +self cloning, automatic path configuration + +copy this into any subdirectory of pypy from which scripts need +to be run, typically all of the test subdirs. +The idea is that any such script simply issues + + import autopath + +and this will make sure that the parent directory containing "pypy" +is in sys.path. + +If you modify the master "autopath.py" version (in pypy/tool/autopath.py) +you can directly run it which will copy itself on all autopath.py files +it finds under the pypy root directory. + +This module always provides these attributes: + + pypydir pypy root directory path + this_dir directory where this autopath.py resides + +""" + +def __dirinfo(part): + """ return (partdir, this_dir) and insert parent of partdir + into sys.path. If the parent directories don't have the part + an EnvironmentError is raised.""" + + import sys, os + try: + head = this_dir = os.path.realpath(os.path.dirname(__file__)) + except NameError: + head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + + error = None + while head: + partdir = head + head, tail = os.path.split(head) + if tail == part: + checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py') + if not os.path.exists(checkfile): + error = "Cannot find %r" % (os.path.normpath(checkfile),) + break + else: + error = "Cannot find the parent directory %r of the path %r" % ( + partdir, this_dir) + if not error: + # check for bogus end-of-line style (e.g. files checked out on + # Windows and moved to Unix) + f = open(__file__.replace('.pyc', '.py'), 'r') + data = f.read() + f.close() + if data.endswith('\r\n') or data.endswith('\r'): + error = ("Bad end-of-line style in the .py files. Typically " + "caused by a zip file or a checkout done on Windows and " + "moved to Unix or vice-versa.") + if error: + raise EnvironmentError("Invalid source tree - bogus checkout! " + + error) + + pypy_root = os.path.join(head, '') + try: + sys.path.remove(head) + except ValueError: + pass + sys.path.insert(0, head) + + munged = {} + for name, mod in sys.modules.items(): + if '.' in name: + continue + fn = getattr(mod, '__file__', None) + if not isinstance(fn, str): + continue + newname = os.path.splitext(os.path.basename(fn))[0] + if not newname.startswith(part + '.'): + continue + path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') + if path.startswith(pypy_root) and newname != part: + modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) + if newname != '__init__': + modpaths.append(newname) + modpath = '.'.join(modpaths) + if modpath not in sys.modules: + munged[modpath] = mod + + for name, mod in munged.iteritems(): + if name not in sys.modules: + sys.modules[name] = mod + if '.' in name: + prename = name[:name.rfind('.')] + postname = name[len(prename)+1:] + if prename not in sys.modules: + __import__(prename) + if not hasattr(sys.modules[prename], postname): + setattr(sys.modules[prename], postname, mod) + + return partdir, this_dir + +def __clone(): + """ clone master version of autopath.py into all subdirs """ + from os.path import join, walk + if not this_dir.endswith(join('pypy','tool')): + raise EnvironmentError("can only clone master version " + "'%s'" % join(pypydir, 'tool',_myname)) + + + def sync_walker(arg, dirname, fnames): + if _myname in fnames: + fn = join(dirname, _myname) + f = open(fn, 'rwb+') + try: + if f.read() == arg: + print "checkok", fn + else: + print "syncing", fn + f = open(fn, 'w') + f.write(arg) + finally: + f.close() + s = open(join(pypydir, 'tool', _myname), 'rb').read() + walk(pypydir, sync_walker, s) + +_myname = 'autopath.py' + +# set guaranteed attributes + +pypydir, this_dir = __dirinfo('pypy') + +if __name__ == '__main__': + __clone() diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -2,6 +2,7 @@ from pypy.rlib.longlong2float import longlong2float, float2longlong from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rpython.test.test_llinterp import interpret def fn(f1): @@ -31,6 +32,18 @@ res = fn2(x) assert repr(res) == repr(x) +def test_interpreted(): + def f(f1): + try: + ll = float2longlong(f1) + return longlong2float(ll) + except Exception: + return 500 + + for x in enum_floats(): + res = interpret(f, [x]) + assert repr(res) == repr(x) + # ____________________________________________________________ def fnsingle(f1): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -350,6 +350,7 @@ 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() 'convert_float_bytes_to_longlong': LLOp(canfold=True), + 'convert_longlong_bytes_to_float': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -427,6 +427,14 @@ ## assert type(x) is int ## return llmemory.cast_int_to_adr(x) +def op_convert_float_bytes_to_longlong(a): + from pypy.rlib.longlong2float import float2longlong + return float2longlong(a) + +def op_convert_longlong_bytes_to_float(a): + from pypy.rlib.longlong2float import longlong2float + return longlong2float(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -165,6 +165,7 @@ v_char = hop.inputarg(rstr.char_repr, arg=1) v_left = hop.inputconst(Bool, left) v_right = hop.inputconst(Bool, right) + hop.exception_is_here() return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) def rtype_method_lstrip(self, hop): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -637,13 +637,16 @@ def _make_split_test(self, split_fn): const = self.const def fn(i): - s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = getattr(s, split_fn)(const('.')) - sum = 0 - for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) - return sum + len(l) * 100 + try: + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.')) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + except MemoryError: + return 42 return fn def test_split(self): diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -660,8 +660,8 @@ if isinstance(fieldtype, lltype.FixedSizeArray): size, _ = expected_size_and_sign return lltype.FixedSizeArray(fieldtype.OF, size/_sizeof(fieldtype.OF)) - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) + raise TypeError("conflict between translating python and compiler field" + " type %r for %r" % (fieldtype, fieldname)) def expose_value_as_rpython(value): if intmask(value) == value: diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -58,9 +58,13 @@ binaries = [(pypy_c, rename_pypy_c)] # if sys.platform == 'win32': + #Don't include a mscvrXX.dll, users should get their own. + #Instructions are provided on the website. + # Can't rename a DLL: it is always called 'libpypy-c.dll' + for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll', + 'libexpat.dll', 'sqlite3.dll', 'libeay32.dll', 'ssleay32.dll']: p = pypy_c.dirpath().join(extra) if not p.check(): diff --git a/pypy/tool/test/test_lib_pypy.py b/pypy/tool/test/test_lib_pypy.py --- a/pypy/tool/test/test_lib_pypy.py +++ b/pypy/tool/test/test_lib_pypy.py @@ -11,7 +11,7 @@ assert lib_pypy.LIB_PYTHON_MODIFIED.check(dir=1) def test_import_from_lib_pypy(): - binascii = lib_pypy.import_from_lib_pypy('binascii') - assert type(binascii) is type(lib_pypy) - assert binascii.__name__ == 'lib_pypy.binascii' - assert hasattr(binascii, 'crc_32_tab') + _functools = lib_pypy.import_from_lib_pypy('_functools') + assert type(_functools) is type(lib_pypy) + assert _functools.__name__ == 'lib_pypy._functools' + assert hasattr(_functools, 'partial') diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -6,10 +6,18 @@ from pypy.annotation.listdef import s_list_of_strings from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform as compiler +from pypy.rlib.rarithmetic import is_emulated_long from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop +_MSVC = compiler.name == "msvc" +_MINGW = compiler.name == "mingw32" +_WIN32 = _MSVC or _MINGW +_WIN64 = _WIN32 and is_emulated_long +# XXX get rid of 'is_emulated_long' and have a real config here. + class AbstractTestAsmGCRoot: # the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved # instructions: @@ -17,6 +25,8 @@ @classmethod def make_config(cls): + if _MSVC and _WIN64: + py.test.skip("all asmgcroot tests disabled for MSVC X64") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -485,6 +485,8 @@ 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', 'paddq', 'pinsr', 'pmul', 'psrl', + # all vectors don't produce pointers + 'v', # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -43,5 +43,6 @@ #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) #endif diff --git a/pypy/translator/c/src/libffi_msvc/win64.asm b/pypy/translator/c/src/libffi_msvc/win64.asm new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/libffi_msvc/win64.asm @@ -0,0 +1,156 @@ +PUBLIC ffi_call_AMD64 + +EXTRN __chkstk:NEAR +EXTRN ffi_closure_SYSV:NEAR + +_TEXT SEGMENT + +;;; ffi_closure_OUTER will be called with these registers set: +;;; rax points to 'closure' +;;; r11 contains a bit mask that specifies which of the +;;; first four parameters are float or double +;;; +;;; It must move the parameters passed in registers to their stack location, +;;; call ffi_closure_SYSV for the actual work, then return the result. +;;; +ffi_closure_OUTER PROC FRAME + ;; save actual arguments to their stack space. + test r11, 1 + jne first_is_float + mov QWORD PTR [rsp+8], rcx + jmp second +first_is_float: + movlpd QWORD PTR [rsp+8], xmm0 + +second: + test r11, 2 + jne second_is_float + mov QWORD PTR [rsp+16], rdx + jmp third +second_is_float: + movlpd QWORD PTR [rsp+16], xmm1 + +third: + test r11, 4 + jne third_is_float + mov QWORD PTR [rsp+24], r8 + jmp forth +third_is_float: + movlpd QWORD PTR [rsp+24], xmm2 + +forth: + test r11, 8 + jne forth_is_float + mov QWORD PTR [rsp+32], r9 + jmp done +forth_is_float: + movlpd QWORD PTR [rsp+32], xmm3 + +done: +.ALLOCSTACK 40 + sub rsp, 40 +.ENDPROLOG + mov rcx, rax ; context is first parameter + mov rdx, rsp ; stack is second parameter + add rdx, 40 ; correct our own area + mov rax, ffi_closure_SYSV + call rax ; call the real closure function + ;; Here, code is missing that handles float return values + add rsp, 40 + movd xmm0, rax ; In case the closure returned a float. + ret 0 +ffi_closure_OUTER ENDP + + +;;; ffi_call_AMD64 + +stack$ = 0 +prepfunc$ = 32 +ecif$ = 40 +bytes$ = 48 +flags$ = 56 +rvalue$ = 64 +fn$ = 72 + +ffi_call_AMD64 PROC FRAME + + mov QWORD PTR [rsp+32], r9 + mov QWORD PTR [rsp+24], r8 + mov QWORD PTR [rsp+16], rdx + mov QWORD PTR [rsp+8], rcx +.PUSHREG rbp + push rbp +.ALLOCSTACK 48 + sub rsp, 48 ; 00000030H +.SETFRAME rbp, 32 + lea rbp, QWORD PTR [rsp+32] +.ENDPROLOG + + mov eax, DWORD PTR bytes$[rbp] + add rax, 15 + and rax, -16 + call __chkstk + sub rsp, rax + lea rax, QWORD PTR [rsp+32] + mov QWORD PTR stack$[rbp], rax + + mov rdx, QWORD PTR ecif$[rbp] + mov rcx, QWORD PTR stack$[rbp] + call QWORD PTR prepfunc$[rbp] + + mov rsp, QWORD PTR stack$[rbp] + + movlpd xmm3, QWORD PTR [rsp+24] + movd r9, xmm3 + + movlpd xmm2, QWORD PTR [rsp+16] + movd r8, xmm2 + + movlpd xmm1, QWORD PTR [rsp+8] + movd rdx, xmm1 + + movlpd xmm0, QWORD PTR [rsp] + movd rcx, xmm0 + + call QWORD PTR fn$[rbp] +ret_int$: + cmp DWORD PTR flags$[rbp], 1 ; FFI_TYPE_INT + jne ret_float$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov DWORD PTR [rcx], eax + jmp SHORT ret_nothing$ + +ret_float$: + cmp DWORD PTR flags$[rbp], 2 ; FFI_TYPE_FLOAT + jne SHORT ret_double$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_double$: + cmp DWORD PTR flags$[rbp], 3 ; FFI_TYPE_DOUBLE + jne SHORT ret_int64$ + + mov rax, QWORD PTR rvalue$[rbp] + movlpd QWORD PTR [rax], xmm0 + jmp SHORT ret_nothing$ + +ret_int64$: + cmp DWORD PTR flags$[rbp], 12 ; FFI_TYPE_SINT64 + jne ret_nothing$ + + mov rcx, QWORD PTR rvalue$[rbp] + mov QWORD PTR [rcx], rax + jmp SHORT ret_nothing$ + +ret_nothing$: + xor eax, eax + + lea rsp, QWORD PTR [rbp+16] + pop rbp + ret 0 +ffi_call_AMD64 ENDP +_TEXT ENDS +END diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py --- a/pypy/translator/driver.py +++ b/pypy/translator/driver.py @@ -585,22 +585,6 @@ # task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - def backend_run(self, backend): - c_entryp = self.c_entryp - standalone = self.standalone - if standalone: - os.system(c_entryp) - else: - runner = self.extra.get('run', lambda f: f()) - runner(c_entryp) - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - def task_llinterpret_lltype(self): from pypy.rpython.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) @@ -710,11 +694,6 @@ shutil.copy(main_exe, '.') self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - def task_source_jvm(self): from pypy.translator.jvm.genjvm import GenJvm from pypy.translator.jvm.node import EntryPoint diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -31,7 +31,6 @@ ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), - ("run", "run the resulting binary", "--run", ""), ("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""), ] def goal_options(): @@ -78,7 +77,7 @@ defaultfactory=list), # xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile'] ArbitraryOption("skipped_goals", "XXX", - defaultfactory=lambda: ['run']), + defaultfactory=list), OptionDescription("goal_options", "Goals that should be reached during translation", goal_options()), diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -241,4 +241,7 @@ 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], + + 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, + 'convert_longlong_bytes_to_float': jvm.PYPYLONGBYTESTODOUBLE, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -941,6 +941,8 @@ PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) +PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) +PYPYLONGBYTESTODOUBLE = Method.v(jPyPy, 'pypy__longlong2float', (jLong,), jDouble) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -301,6 +301,8 @@ global platform log.msg("Setting platform to %r cc=%s" % (new_platform,cc)) platform = pick_platform(new_platform, cc) + if not platform: + raise ValueError("pick_platform failed") if new_platform == 'host': global host diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -7,15 +7,27 @@ from pypy.translator.platform import log, _run_subprocess from pypy.translator.platform import Platform, posix +def _get_compiler_type(cc, x64_flag): + import subprocess + if not cc: + cc = os.environ.get('CC','') + if not cc: + return MsvcPlatform(cc=cc, x64=x64_flag) + elif cc.startswith('mingw'): + return MingwPlatform(cc) + try: + subprocess.check_output([cc, '--version']) + except: + raise ValueError,"Could not find compiler specified by cc option" + \ + " '%s', it must be a valid exe file on your path"%cc + return MingwPlatform(cc) + def Windows(cc=None): - if cc == 'mingw32': - return MingwPlatform(cc) - else: - return MsvcPlatform(cc, False) + return _get_compiler_type(cc, False) + +def Windows_x64(cc=None): + return _get_compiler_type(cc, True) -def Windows_x64(cc=None): - return MsvcPlatform(cc, True) - def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -31,14 +43,16 @@ vcvars = os.path.join(toolsdir, 'vsvars32.bat') import subprocess - popen = subprocess.Popen('"%s" & set' % (vcvars,), + try: + popen = subprocess.Popen('"%s" & set' % (vcvars,), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = popen.communicate() - if popen.wait() != 0: - return - + stdout, stderr = popen.communicate() + if popen.wait() != 0: + return None + except: + return None env = {} stdout = stdout.replace("\r\n", "\n") @@ -395,7 +409,9 @@ so_ext = 'dll' def __init__(self, cc=None): - Platform.__init__(self, 'gcc') + if not cc: + cc = 'gcc' + Platform.__init__(self, cc) def _args_for_shared(self, args): return ['-shared'] + args diff --git a/pypy/translator/test/test_driver.py b/pypy/translator/test/test_driver.py --- a/pypy/translator/test/test_driver.py +++ b/pypy/translator/test/test_driver.py @@ -6,7 +6,7 @@ def test_ctr(): td = TranslationDriver() expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source', - 'compile', 'run', 'pyjitpl'] + 'compile', 'pyjitpl'] assert set(td.exposed) == set(expected) assert td.backend_select_goals(['compile_c']) == ['compile_c'] @@ -33,7 +33,6 @@ 'rtype_ootype', 'rtype_lltype', 'source_cli', 'source_c', 'compile_cli', 'compile_c', - 'run_c', 'run_cli', 'compile_jvm', 'source_jvm', 'run_jvm', 'pyjitpl_lltype', 'pyjitpl_ootype'] @@ -50,6 +49,6 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'run_c', 'pyjitpl'] + 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) diff --git a/pypy/translator/test/test_unsimplify.py b/pypy/translator/test/test_unsimplify.py --- a/pypy/translator/test/test_unsimplify.py +++ b/pypy/translator/test/test_unsimplify.py @@ -78,7 +78,7 @@ return x * 6 def hello_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_initial_function(t, hello_world) @@ -97,7 +97,7 @@ return x * 6 def goodbye_world(): if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0) + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) graph, t = translate(f, [int], type_system) call_final_function(t, goodbye_world) From noreply at buildbot.pypy.org Wed Mar 28 08:06:50 2012 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Mar 2012 08:06:50 +0200 (CEST) Subject: [pypy-commit] pypy default: reformat remove tab Message-ID: <20120328060651.00BC4822B2@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r54039:ad198fd9857a Date: 2012-03-28 08:06 +0200 http://bitbucket.org/pypy/pypy/changeset/ad198fd9857a/ Log: reformat remove tab diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,7 +28,7 @@ return self.identity def descr_call(self, space, __args__): - from interp_numarray import BaseArray + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do From noreply at buildbot.pypy.org Wed Mar 28 10:53:32 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 10:53:32 +0200 (CEST) Subject: [pypy-commit] pypy default: add a cpyext-related project. Message-ID: <20120328085332.5C50C822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54040:bf0eeb2df227 Date: 2012-03-28 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/bf0eeb2df227/ Log: add a cpyext-related project. diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -149,6 +149,22 @@ exported. This would give us a one-size-fits-all generic .so file to be imported by any application that wants to load .so files :-) +Optimising cpyext (CPython C-API compatibility layer) +----------------------------------------------------- + +A lot of work has gone into PyPy's implementation of CPython's C-API over +the last years to let it reach a practical level of compatibility, so that +C extensions for CPython work on PyPy without major rewrites. However, +there are still many edges and corner cases where it misbehaves, and it has +not received any substantial optimisation so far. + +The objective of this project is to fix bugs in cpyext and to optimise +several performance critical parts of it, such as the reference counting +support and other heavily used C-API functions. The net result would be to +have CPython extensions run much faster on PyPy than they currently do, or +to make them work at all if they currently don't. A part of this work would +be to get cpyext into a shape where it supports running Cython generated +extensions. .. _`issue tracker`: http://bugs.pypy.org .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev From noreply at buildbot.pypy.org Wed Mar 28 10:53:35 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 10:53:35 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20120328085335.15378822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54041:372ef0512d66 Date: 2012-03-28 10:53 +0200 http://bitbucket.org/pypy/pypy/changeset/372ef0512d66/ Log: merge diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -27,6 +27,12 @@ def constfloat(x): return ConstFloat(longlong.getfloatstorage(x)) +def boxlonglong(ll): + if longlong.is_64_bit: + return BoxInt(ll) + else: + return BoxFloat(ll) + class Runner(object): @@ -1623,6 +1629,11 @@ [boxfloat(2.5)], t).value assert res == longlong2float.float2longlong(2.5) + bytes = longlong2float.float2longlong(2.5) + res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, + [boxlonglong(res)], 'float').value + assert longlong.getrealfloat(res) == 2.5 + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -328,6 +328,15 @@ def produce_into(self, builder, r): self.put(builder, [r.choice(builder.intvars)]) +class CastLongLongToFloatOperation(AbstractFloatOperation): + def produce_into(self, builder, r): + if longlong.is_64_bit: + self.put(builder, [r.choice(builder.intvars)]) + else: + if not builder.floatvars: + raise CannotProduceOperation + self.put(builder, [r.choice(builder.floatvars)]) + class CastFloatToIntOperation(AbstractFloatOperation): def produce_into(self, builder, r): if not builder.floatvars: @@ -450,6 +459,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) +OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1251,6 +1251,15 @@ else: self.mov(loc0, resloc) + def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -777,7 +777,20 @@ loc0 = self.xrm.loc(arg0) loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.xrm.possibly_free_var(arg0) + + def consider_convert_longlong_bytes_to_float(self, op): + if longlong.is_64_bit: + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.rm.possibly_free_var(op.getarg(0)) + else: + arg0 = op.getarg(0) + loc0 = self.xrm.make_sure_var_in_reg(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -295,6 +295,7 @@ return op rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,20 +968,22 @@ int_return %i2 """, transform=True) - def test_convert_float_bytes_to_int(self): - from pypy.rlib.longlong2float import float2longlong + def test_convert_float_bytes(self): + from pypy.rlib.longlong2float import float2longlong, longlong2float def f(x): - return float2longlong(x) + ll = float2longlong(x) + return longlong2float(ll) if longlong.is_64_bit: - result_var = "%i0" - return_op = "int_return" + tmp_var = "%i0" + result_var = "%f1" else: - result_var = "%f1" - return_op = "float_return" + tmp_var = "%f1" + result_var = "%f2" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %%f0 -> %(result_var)s - %(return_op)s %(result_var)s - """ % {"result_var": result_var, "return_op": return_op}) + convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s + convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s + float_return %(result_var)s + """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -672,6 +672,11 @@ a = longlong.getrealfloat(a) return longlong2float.float2longlong(a) + @arguments(LONGLONG_TYPECODE, returns="f") + def bhimpl_convert_longlong_bytes_to_float(a): + a = longlong2float.longlong2float(a) + return longlong.getfloatstorage(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -224,6 +224,7 @@ 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', 'convert_float_bytes_to_longlong', + 'convert_longlong_bytes_to_float', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -420,6 +420,7 @@ 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', + 'CONVERT_LONGLONG_BYTES_TO_FLOAT/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,3 +1,4 @@ +import math import sys import py @@ -15,7 +16,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3795,15 +3796,15 @@ res = self.interp_operations(g, [1]) assert res == 3 - def test_float2longlong(self): + def test_float_bytes(self): def f(n): - return float2longlong(n) + ll = float2longlong(n) + return longlong2float(ll) for x in [2.5, float("nan"), -2.5, float("inf")]: # There are tests elsewhere to verify the correctness of this. - expected = float2longlong(x) res = self.interp_operations(f, [x]) - assert longlong.getfloatstorage(res) == expected + assert res == x or math.isnan(x) and math.isnan(res) class TestLLtype(BaseLLtypeTests, LLJitMixin): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,7 +28,7 @@ return self.identity def descr_call(self, space, __args__): - from interp_numarray import BaseArray + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -21,7 +21,7 @@ FLOAT_ARRAY_PTR = lltype.Ptr(lltype.Array(rffi.FLOAT)) # these definitions are used only in tests, when not translated -def longlong2float_emulator(llval): +def longlong2float(llval): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) ll_array[0] = llval @@ -51,12 +51,6 @@ eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" -static double pypy__longlong2float(long long x) { - double dd; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&dd, &x, 8); - return dd; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -71,12 +65,6 @@ } """]) -longlong2float = rffi.llexternal( - "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, - _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__longlong2float") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,4 +87,17 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) - return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) + hop.exception_cannot_occur() + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) + +class LongLong2FloatEntry(ExtRegistryEntry): + _about_ = longlong2float + + def compute_result_annotation(self, s_longlong): + assert annmodel.SomeInteger(knowntype=r_int64).contains(s_longlong) + return annmodel.SomeFloat() + + def specialize_call(self, hop): + [v_longlong] = hop.inputargs(lltype.SignedLongLong) + hop.exception_cannot_occur() + return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -2,6 +2,7 @@ from pypy.rlib.longlong2float import longlong2float, float2longlong from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rpython.test.test_llinterp import interpret def fn(f1): @@ -31,6 +32,18 @@ res = fn2(x) assert repr(res) == repr(x) +def test_interpreted(): + def f(f1): + try: + ll = float2longlong(f1) + return longlong2float(ll) + except Exception: + return 500 + + for x in enum_floats(): + res = interpret(f, [x]) + assert repr(res) == repr(x) + # ____________________________________________________________ def fnsingle(f1): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -350,6 +350,7 @@ 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() 'convert_float_bytes_to_longlong': LLOp(canfold=True), + 'convert_longlong_bytes_to_float': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -431,6 +431,10 @@ from pypy.rlib.longlong2float import float2longlong return float2longlong(a) +def op_convert_longlong_bytes_to_float(a): + from pypy.rlib.longlong2float import longlong2float + return longlong2float(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -7,10 +7,17 @@ from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform as compiler +from pypy.rlib.rarithmetic import is_emulated_long from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop +_MSVC = compiler.name == "msvc" +_MINGW = compiler.name == "mingw32" +_WIN32 = _MSVC or _MINGW +_WIN64 = _WIN32 and is_emulated_long +# XXX get rid of 'is_emulated_long' and have a real config here. + class AbstractTestAsmGCRoot: # the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved # instructions: @@ -18,8 +25,8 @@ @classmethod def make_config(cls): - if compiler.name == "msvc": - py.test.skip("all asmgcroot tests disabled for MSVC") + if _MSVC and _WIN64: + py.test.skip("all asmgcroot tests disabled for MSVC X64") from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True) config.translation.gc = cls.gcpolicy diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -43,5 +43,6 @@ #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) #endif diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -243,4 +243,5 @@ 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, + 'convert_longlong_bytes_to_float': jvm.PYPYLONGBYTESTODOUBLE, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -942,6 +942,7 @@ PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) +PYPYLONGBYTESTODOUBLE = Method.v(jPyPy, 'pypy__longlong2float', (jLong,), jDouble) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Wed Mar 28 15:24:07 2012 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Mar 2012 15:24:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Remote a few Unimplemented features that have already been Message-ID: <20120328132407.DB6CC822B2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54042:aa43421f4125 Date: 2012-03-28 15:23 +0200 http://bitbucket.org/pypy/pypy/changeset/aa43421f4125/ Log: Remote a few Unimplemented features that have already been implemented. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -199,17 +199,11 @@ The following features (present in some past Stackless version of PyPy) are for the time being not supported any more: -* Tasklets and channels (currently ``stackless.py`` seems to import, - but you have tasklets on top of coroutines on top of greenlets on - top of continulets on top of stacklets, and it's probably not too - hard to cut two of these levels by adapting ``stackless.py`` to - use directly continulets) - * Coroutines (could be rewritten at app-level) -* Pickling and unpickling continulets (*) - -* Continuing execution of a continulet in a different thread (*) +* Continuing execution of a continulet in a different thread + (but if it is "simple enough", you can pickle it and unpickle it + in the other thread). * Automatic unlimited stack (must be emulated__ so far) @@ -217,15 +211,6 @@ .. __: `recursion depth limit`_ -(*) Pickling, as well as changing threads, could be implemented by using -a "soft" stack switching mode again. We would get either "hard" or -"soft" switches, similarly to Stackless Python 3rd version: you get a -"hard" switch (like now) when the C stack contains non-trivial C frames -to save, and a "soft" switch (like previously) when it contains only -simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, and the switch might be a bit faster too -(unsure about that; what is the Stackless Python experience?). - Recursion depth limit +++++++++++++++++++++ From noreply at buildbot.pypy.org Wed Mar 28 19:33:05 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 28 Mar 2012 19:33:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Unroll some functions in numpy correctly. Message-ID: <20120328173305.228B7822B2@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r54043:cf91e948ab75 Date: 2012-03-28 13:32 -0400 http://bitbucket.org/pypy/pypy/changeset/cf91e948ab75/ Log: Unroll some functions in numpy correctly. diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -3,9 +3,11 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_boxes, interp_dtype, support, loop +from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name + class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] @@ -179,7 +181,7 @@ elif out.shape != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + - ' , got [%s]', + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.shape]), ) @@ -204,7 +206,7 @@ else: arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) val = loop.compute(arr) - return val + return val def do_axis_reduce(self, obj, dtype, axis, result): from pypy.module.micronumpy.interp_numarray import AxisReduce @@ -253,7 +255,7 @@ if isinstance(w_obj, Scalar): arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) if isinstance(out,Scalar): - out.value=arr + out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: @@ -265,7 +267,7 @@ if not broadcast_shape or broadcast_shape != out.shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + - ' to [%s]', + ' to [%s]', ",".join([str(x) for x in w_obj.shape]), ",".join([str(x) for x in out.shape]), ) @@ -292,10 +294,11 @@ self.func = func self.comparison_func = comparison_func + @jit.unroll_safe def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement, BaseArray) - if len(args_w)>2: + if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w @@ -326,7 +329,7 @@ w_rhs.value.convert_to(calc_dtype) ) if isinstance(out,Scalar): - out.value=arr + out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: @@ -337,7 +340,7 @@ if out and out.shape != shape_agreement(space, new_shape, out.shape): raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + - ' to [%s]', + ' to [%s]', ",".join([str(x) for x in new_shape]), ",".join([str(x) for x in out.shape]), ) @@ -347,7 +350,6 @@ w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) if out: - #out.add_invalidates(w_res) #causes a recursion loop w_res.get_concrete() return w_res diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,7 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError + at jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) def enumerate_chunks(chunks): result = [] i = -1 @@ -85,9 +86,9 @@ space.isinstance_w(w_item_or_slice, space.w_slice)): raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) - + start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - + coords = [0] * len(shape) i = start if order == 'C': diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,3 +1,7 @@ +from pypy.rlib import jit + + + at jit.look_inside_iff(lambda s: jit.isconstant(len(s))) def product(s): i = 1 for x in s: From noreply at buildbot.pypy.org Wed Mar 28 19:58:38 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 19:58:38 +0200 (CEST) Subject: [pypy-commit] jitviewer default: count percentage better Message-ID: <20120328175838.75A24822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r197:bc9ef9dbc0a0 Date: 2012-03-28 19:56 +0200 http://bitbucket.org/pypy/jitviewer/changeset/bc9ef9dbc0a0/ Log: count percentage better diff --git a/_jitviewer/app.py b/_jitviewer/app.py --- a/_jitviewer/app.py +++ b/_jitviewer/app.py @@ -127,7 +127,7 @@ op.count = getattr(subloop, 'count', '?') if (hasattr(subloop, 'count') and hasattr(orig_loop, 'count')): - op.percentage = subloop.count / orig_loop.count + op.percentage = int((float(subloop.count) / orig_loop.count)*100) else: op.percentage = '?' loop = FunctionHtml.from_operations(ops, self.storage, From noreply at buildbot.pypy.org Wed Mar 28 19:58:40 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 19:58:40 +0200 (CEST) Subject: [pypy-commit] jitviewer default: merge Message-ID: <20120328175840.2F9AA822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r198:4d3c56a6bae2 Date: 2012-03-28 19:57 +0200 http://bitbucket.org/pypy/jitviewer/changeset/4d3c56a6bae2/ Log: merge diff --git a/log.pypylog b/log.pypylog --- a/log.pypylog +++ b/log.pypylog @@ -1,132 +1,416 @@ -[19b74a641544] {jit-backend-dump +[b235450e14d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb000 +0 4157415641554154415341524151415057565554535251504889E341BBD01BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 -[19b74a65cbaa] jit-backend-dump} -[19b74a65f370] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165000 +0 4157415641554154415341524151415057565554535251504889E341BBD01BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 +[b235451eb57] jit-backend-dump} +[b235451fe75] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb043 +0 4157415641554154415341524151415057565554535251504889E341BB801BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 -[19b74a662820] jit-backend-dump} -[19b74a6679d4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165043 +0 4157415641554154415341524151415057565554535251504889E341BB801BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 +[b23545214cd] jit-backend-dump} +[b2354524175] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BBD01BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 -[19b74a66c738] jit-backend-dump} -[19b74a66e694] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BBD01BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 +[b2354526575] jit-backend-dump} +[b23545272ef] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB801BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 -[19b74a672cb4] jit-backend-dump} -[19b74a678492] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB801BF30041FFD34889DF4883E4F041BB60C4D30041FFD3488D65D8415F415E415D415C5B5DC3 +[b235452931d] jit-backend-dump} +[b235452c095] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb210 +0 41BBE01AF30041FFD3B803000000488D65D8415F415E415D415C5B5DC3 -[19b74a67a754] jit-backend-dump} -[19b74a684618] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165210 +0 41BBE01AF30041FFD3B803000000488D65D8415F415E415D415C5B5DC3 +[b235452cfbb] jit-backend-dump} +[b2354533197] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb22d +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C24C8955B048894D80488975904C8945A04C894DA848897D984889D741BB1096CF0041FFE3 -[19b74a688746] jit-backend-dump} -[19b74a6913aa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416522d +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C24C8955B048894D80488975904C8945A04C894DA848897D984889D741BB1096CF0041FFE3 +[b2354534fd1] jit-backend-dump} +[b235453a431] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb2c2 +0 4C8B55B0488B4D80488B75904C8B45A04C8B4DA8488B7D98F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC24880000004885C07409488B142530255601C349BB10B21B18F07F000041FFE3 -[19b74a69574e] jit-backend-dump} -[19b74a699924] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141652c2 +0 4C8B55B0488B4D80488B75904C8B45A04C8B4DA8488B7D98F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC24880000004885C07409488B142530255601C349BB10521614497F000041FFE3 +[b235453c0ad] jit-backend-dump} +[b235453e3d7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb363 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C24384889E741BBD036A90041FFD3488B0425A046A0024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC341BB801BF30041FFD3B8030000004883C478C3 -[19b74a69dc74] jit-backend-dump} -[19b74a69f5f4] {jit-backend-counts -[19b74a69fe64] jit-backend-counts} -[19b74b072b39] {jit-backend -[19b74b882b8a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165363 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C24384889E741BBD036A90041FFD3488B0425A046A0024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC341BB801BF30041FFD3B8030000004883C478C3 +[b23545400b3] jit-backend-dump} +[b2354540e4b] {jit-backend-counts +[b23545411c9] jit-backend-counts} +[b2354a7a4cd] {jit-backend +[b2355001144] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb406 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BBF020011BF07F00004D8B3B4983C70149BBF020011BF07F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284D8B40304889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF48898D40FFFFFF4C898538FFFFFF49BB0821011BF07F00004D8B034983C00149BB0821011BF07F00004D89034983FA030F85000000008138806300000F85000000004C8B50104D85D20F84000000004C8B4008498B4A108139582D03000F85000000004D8B5208498B4A08498B52104D8B52184983F8000F8C000000004D39D00F8D000000004D89C14C0FAFC24989CC4C01C14983C1014C8948084983FD000F85000000004883FB017206813BF82200000F850000000049BB68162E18F07F00004D39DE0F85000000004C8B73084983C6010F8000000000488B1C254845A0024883FB000F8C0000000048898D30FFFFFF49BB2021011BF07F0000498B0B4883C10149BB2021011BF07F000049890B4D39D10F8D000000004C89C94C0FAFCA4C89E34D01CC4883C101488948084D89F14983C6010F80000000004C8B0C254845A0024983F9000F8C000000004C89A530FFFFFF4989C94989DCE993FFFFFF49BB00B01B18F07F000041FFD32944404838354C510C5400585C030400000049BB00B01B18F07F000041FFD34440004838354C0C54585C030500000049BB00B01B18F07F000041FFD3444000284838354C0C54585C030600000049BB00B01B18F07F000041FFD34440002104284838354C0C54585C030700000049BB00B01B18F07F000041FFD3444000212909054838354C0C54585C030800000049BB00B01B18F07F000041FFD34440002109054838354C0C54585C030900000049BB00B01B18F07F000041FFD335444048384C0C54005C05030A00000049BB00B01B18F07F000041FFD344400C48384C005C05030B00000049BB00B01B18F07F000041FFD3444038484C0C005C05030C00000049BB00B01B18F07F000041FFD344400C39484C0005030D00000049BB00B01B18F07F000041FFD34440484C003905030E00000049BB00B01B18F07F000041FFD34440484C003905030F00000049BB00B01B18F07F000041FFD3444000250931484C3961031000000049BB00B01B18F07F000041FFD3444039484C00312507031100000049BB00B01B18F07F000041FFD34440484C0039310707031200000049BB00B01B18F07F000041FFD34440484C00393107070313000000 -[19b74b8b3bc8] jit-backend-dump} -[19b74b8b4f7e] {jit-backend-addr -Loop 0 ( #19 FOR_ITER) has address 7ff0181bb43c to 7ff0181bb619 (bootstrap 7ff0181bb406) -[19b74b8b759a] jit-backend-addr} -[19b74b8b897a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165406 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BBF0C0FB16497F00004D8B3B4983C70149BBF0C0FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B50184D8B40204889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF4C898540FFFFFF49BB08C1FB16497F00004D8B034983C00149BB08C1FB16497F00004D89034983FA010F85000000004883FB017206813BF82200000F85000000004983FD000F850000000049BB48B92814497F00004D39DE0F85000000004C8B73084981FE4F0400000F8D000000004983C601488B1C254845A0024883FB000F8C0000000049BB20C1FB16497F0000498B1B4883C30149BB20C1FB16497F000049891B4981FE4F0400000F8D000000004983C601488B1C254845A0024883FB000F8C00000000E9BAFFFFFF49BB00501614497F000041FFD32944404838354C510C5458030400000049BB00501614497F000041FFD344400C4838354C5458030500000049BB00501614497F000041FFD335444048384C0C58030600000049BB00501614497F000041FFD3444038484C0C58030700000049BB00501614497F000041FFD344400C484C030800000049BB00501614497F000041FFD34440484C39030900000049BB00501614497F000041FFD34440484C39030A00000049BB00501614497F000041FFD34440484C39030B00000049BB00501614497F000041FFD34440484C3907030C00000049BB00501614497F000041FFD34440484C3907030D000000 +[b235501e631] jit-backend-dump} +[b235501ef40] {jit-backend-addr +Loop 0 ( #9 LOAD_FAST) has address 7f491416543c to 7f491416557e (bootstrap 7f4914165406) +[b23550204a9] jit-backend-addr} +[b2355021154] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb438 +0 30FFFFFF -[19b74b8ba696] jit-backend-dump} -[19b74b8bb872] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165438 +0 40FFFFFF +[b2355021ebc] jit-backend-dump} +[b23550229de] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb4ed +0 28010000 -[19b74b8bd090] jit-backend-dump} -[19b74b8bdc06] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141654de +0 9C000000 +[b23550234a3] jit-backend-dump} +[b2355023932] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb4f9 +0 3B010000 -[19b74b8bf19c] jit-backend-dump} -[19b74b8bfad8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141654f0 +0 A7000000 +[b23550242e6] jit-backend-dump} +[b235502472a] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb506 +0 4B010000 -[19b74b8c102c] jit-backend-dump} -[19b74b8c196e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141654fa +0 B8000000 +[b23550250a8] jit-backend-dump} +[b23550254ef] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb51a +0 55010000 -[19b74b8c2e50] jit-backend-dump} -[19b74b8c3744] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416550d +0 BF000000 +[b2355026083] jit-backend-dump} +[b23550265e4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb534 +0 5B010000 -[19b74b8c4da6] jit-backend-dump} -[19b74b8c57f6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416551e +0 C7000000 +[b23550270c7] jit-backend-dump} +[b23550277cf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb53d +0 73010000 -[19b74b8c6e04] jit-backend-dump} -[19b74b8c7746] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165534 +0 DF000000 +[b2355028126] jit-backend-dump} +[b2355028573] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb55c +0 74010000 -[19b74b8c8cfa] jit-backend-dump} -[19b74b8c9636] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416555f +0 CB000000 +[b2355028ee2] jit-backend-dump} +[b2355029398] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb56e +0 7F010000 -[19b74b8cab78] jit-backend-dump} -[19b74b8cb49c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165575 +0 E4000000 +[b2355029d01] jit-backend-dump} +[b235502a970] jit-backend} +[b235502de9e] {jit-log-opt-loop +# Loop 0 ( #9 LOAD_FAST) : loop with 53 ops +[p0, p1] ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p15 = getfield_gc(p0, descr=) ++131: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, descr=TargetToken(139951847702960)) +debug_merge_point(0, ' #9 LOAD_FAST') ++210: guard_value(i6, 1, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14] ++220: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, i4, p5, p12, p14] ++238: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p14] +debug_merge_point(0, ' #12 LOAD_CONST') ++248: guard_value(p3, ConstPtr(ptr19), descr=) [p1, p0, p3, p2, p5, p10, p14] +debug_merge_point(0, ' #15 COMPARE_OP') ++267: i20 = getfield_gc_pure(p10, descr=) ++271: i22 = int_lt(i20, 1103) +guard_true(i22, descr=) [p1, p0, p10, p2, p5] +debug_merge_point(0, ' #18 POP_JUMP_IF_FALSE') +debug_merge_point(0, ' #21 LOAD_FAST') +debug_merge_point(0, ' #24 LOAD_CONST') +debug_merge_point(0, ' #27 INPLACE_ADD') ++284: i24 = int_add(i20, 1) +debug_merge_point(0, ' #28 STORE_FAST') +debug_merge_point(0, ' #31 JUMP_ABSOLUTE') ++288: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i24] ++288: i26 = getfield_raw(44057928, descr=) ++296: i28 = int_lt(i26, 0) +guard_false(i28, descr=) [p1, p0, p2, p5, i24] +debug_merge_point(0, ' #9 LOAD_FAST') ++306: label(p0, p1, p2, p5, i24, descr=TargetToken(139951847703040)) +debug_merge_point(0, ' #9 LOAD_FAST') +debug_merge_point(0, ' #12 LOAD_CONST') +debug_merge_point(0, ' #15 COMPARE_OP') ++336: i29 = int_lt(i24, 1103) +guard_true(i29, descr=) [p1, p0, p2, p5, i24] +debug_merge_point(0, ' #18 POP_JUMP_IF_FALSE') +debug_merge_point(0, ' #21 LOAD_FAST') +debug_merge_point(0, ' #24 LOAD_CONST') +debug_merge_point(0, ' #27 INPLACE_ADD') ++349: i30 = int_add(i24, 1) +debug_merge_point(0, ' #28 STORE_FAST') +debug_merge_point(0, ' #31 JUMP_ABSOLUTE') ++353: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i30, None] ++353: i32 = getfield_raw(44057928, descr=) ++361: i33 = int_lt(i32, 0) +guard_false(i33, descr=) [p1, p0, p2, p5, i30, None] +debug_merge_point(0, ' #9 LOAD_FAST') ++371: jump(p0, p1, p2, p5, i30, descr=TargetToken(139951847703040)) ++376: --end of the loop-- +[b23550c78d9] jit-log-opt-loop} +[b2355422029] {jit-backend +[b2355483d2a] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb581 +0 87010000 -[19b74b8cc97e] jit-backend-dump} -[19b74b8cd446] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165686 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BBD8C0FB16497F00004D8B3B4983C70149BBD8C0FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B50184D8B40204889B570FFFFFF4C89BD68FFFFFF4C89A560FFFFFF4C898D58FFFFFF48899550FFFFFF4C898548FFFFFF49BB38C1FB16497F00004D8B034983C00149BB38C1FB16497F00004D89034983FA010F85000000004883FB017206813BF82200000F85000000004983FD000F850000000049BB70BB2814497F00004D39DE0F85000000004C8B73084981FE4F0400000F8D000000004C8B6F0849BBA86B2814497F00004D39DD0F85000000004D8B551049BBC06B2814497F00004D39DA0F85000000004889BD40FFFFFF41BB201B8D0041FFD3488B78404C8B68504D85ED0F85000000004C8B68284983FD000F85000000004983C601488B3C254845A0024883FF000F8C0000000049BB50C1FB16497F0000498B3B4883C70149BB50C1FB16497F000049893B4981FE4F0400000F8D000000004983C601488B3C254845A0024883FF000F8C00000000E9BAFFFFFF49BB00501614497F000041FFD329401C443835484D0C5054030E00000049BB00501614497F000041FFD3401C0C443835485054030F00000049BB00501614497F000041FFD335401C4438480C54031000000049BB00501614497F000041FFD3401C3844480C54031100000049BB00501614497F000041FFD3401C0C4448031200000049BB00501614497F000041FFD3401C3444480C031300000049BB00501614497F000041FFD3401C283444480C031400000049BB00501614497F000041FFD3401C3444480C031500000049BB00501614497F000041FFD34058003444480C1C15031600000049BB00501614497F000041FFD340580044480C1C15031700000049BB00501614497F000041FFD340584448390707031800000049BB00501614497F000041FFD340584448390707031900000049BB00501614497F000041FFD34058444839031A00000049BB00501614497F000041FFD34058444839031B00000049BB00501614497F000041FFD3405844483907031C000000 +[b235548d018] jit-backend-dump} +[b235548dd80] {jit-backend-addr +Loop 1 ( #9 LOAD_FAST) has address 7f49141656bc to 7f4914165854 (bootstrap 7f4914165686) +[b235548eba2] jit-backend-addr} +[b235548f46c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb58f +0 94010000 -[19b74b8ceca0] jit-backend-dump} -[19b74b8cf9a8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141656b8 +0 40FFFFFF +[b2355490198] jit-backend-dump} +[b2355490b31] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb5a1 +0 B5010000 -[19b74b8d100a] jit-backend-dump} -[19b74b8d1952] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165757 +0 F9000000 +[b235549d335] jit-backend-dump} +[b235549d962] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb5cf +0 A0010000 -[19b74b8d2ebe] jit-backend-dump} -[19b74b8d37f4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165769 +0 04010000 +[b235549e4d8] jit-backend-dump} +[b235549ea21] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb5f1 +0 9A010000 -[19b74b8d4d18] jit-backend-dump} -[19b74b8d570e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165773 +0 15010000 +[b235549f4e3] jit-backend-dump} +[b235549f933] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb603 +0 BE010000 -[19b74b8d6de8] jit-backend-dump} -[19b74b8d84d4] jit-backend} -[19b74b8de708] {jit-log-opt-loop -# Loop 0 ( #19 FOR_ITER) : loop with 73 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165786 +0 1C010000 +[b23554a02cf] jit-backend-dump} +[b23554a070a] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165797 +0 24010000 +[b23554a108e] jit-backend-dump} +[b23554a15ef] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657ae +0 24010000 +[b23554a2123] jit-backend-dump} +[b23554a2693] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657c5 +0 25010000 +[b23554a302f] jit-backend-dump} +[b23554a3623] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657e6 +0 35010000 +[b23554a4145] jit-backend-dump} +[b23554a46a3] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657f4 +0 42010000 +[b23554a5186] jit-backend-dump} +[b23554a571d] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416580a +0 5F010000 +[b23554a62e4] jit-backend-dump} +[b23554a682a] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165835 +0 4D010000 +[b23554a723e] jit-backend-dump} +[b23554a776f] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416584b +0 65010000 +[b23554a815c] jit-backend-dump} +[b23554a8b5b] jit-backend} +[b23554aab05] {jit-log-opt-loop +# Loop 1 ( #9 LOAD_FAST) : loop with 76 ops +[p0, p1] ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p15 = getfield_gc(p0, descr=) ++131: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, descr=TargetToken(139951847708240)) +debug_merge_point(0, ' #9 LOAD_FAST') ++203: guard_value(i6, 1, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14] ++213: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, i4, p5, p12, p14] ++231: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p14] +debug_merge_point(0, ' #12 LOAD_CONST') ++241: guard_value(p3, ConstPtr(ptr19), descr=) [p1, p0, p3, p2, p5, p10, p14] +debug_merge_point(0, ' #15 COMPARE_OP') ++260: i20 = getfield_gc_pure(p10, descr=) ++264: i22 = int_lt(i20, 1103) +guard_true(i22, descr=) [p1, p0, p10, p2, p5] +debug_merge_point(0, ' #18 POP_JUMP_IF_FALSE') +debug_merge_point(0, ' #21 LOAD_GLOBAL') ++277: p23 = getfield_gc(p0, descr=) ++281: guard_value(p23, ConstPtr(ptr24), descr=) [p1, p0, p23, p2, p5, p10] ++300: p25 = getfield_gc(p23, descr=) ++304: guard_value(p25, ConstPtr(ptr26), descr=) [p1, p0, p25, p23, p2, p5, p10] ++323: guard_not_invalidated(, descr=) [p1, p0, p23, p2, p5, p10] +debug_merge_point(0, ' #24 LOAD_FAST') +debug_merge_point(0, ' #27 CALL_FUNCTION') ++323: p28 = call(ConstClass(getexecutioncontext), descr=) ++339: p29 = getfield_gc(p28, descr=) ++343: i30 = force_token() ++343: p31 = getfield_gc(p28, descr=) ++347: guard_isnull(p31, descr=) [p1, p0, p28, p31, p2, p5, p10, p29, i30] ++356: i32 = getfield_gc(p28, descr=) ++360: i33 = int_is_zero(i32) +guard_true(i33, descr=) [p1, p0, p28, p2, p5, p10, p29, i30] +debug_merge_point(1, ' #0 LOAD_FAST') +debug_merge_point(1, ' #3 LOAD_CONST') +debug_merge_point(1, ' #6 BINARY_ADD') ++370: i35 = int_add(i20, 1) +debug_merge_point(1, ' #7 RETURN_VALUE') +debug_merge_point(0, ' #30 STORE_FAST') +debug_merge_point(0, ' #33 JUMP_ABSOLUTE') ++374: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i35, None, None] ++374: i38 = getfield_raw(44057928, descr=) ++382: i40 = int_lt(i38, 0) +guard_false(i40, descr=) [p1, p0, p2, p5, i35, None, None] +debug_merge_point(0, ' #9 LOAD_FAST') ++392: p41 = same_as(ConstPtr(ptr26)) ++392: label(p0, p1, p2, p5, i35, descr=TargetToken(139951847708320)) +debug_merge_point(0, ' #9 LOAD_FAST') +debug_merge_point(0, ' #12 LOAD_CONST') +debug_merge_point(0, ' #15 COMPARE_OP') ++422: i42 = int_lt(i35, 1103) +guard_true(i42, descr=) [p1, p0, p2, p5, i35] +debug_merge_point(0, ' #18 POP_JUMP_IF_FALSE') +debug_merge_point(0, ' #21 LOAD_GLOBAL') ++435: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i35] +debug_merge_point(0, ' #24 LOAD_FAST') +debug_merge_point(0, ' #27 CALL_FUNCTION') ++435: i43 = force_token() +debug_merge_point(1, ' #0 LOAD_FAST') +debug_merge_point(1, ' #3 LOAD_CONST') +debug_merge_point(1, ' #6 BINARY_ADD') ++435: i44 = int_add(i35, 1) +debug_merge_point(1, ' #7 RETURN_VALUE') +debug_merge_point(0, ' #30 STORE_FAST') +debug_merge_point(0, ' #33 JUMP_ABSOLUTE') ++439: i45 = getfield_raw(44057928, descr=) ++447: i46 = int_lt(i45, 0) +guard_false(i46, descr=) [p1, p0, p2, p5, i44, None] +debug_merge_point(0, ' #9 LOAD_FAST') ++457: jump(p0, p1, p2, p5, i44, descr=TargetToken(139951847708320)) ++462: --end of the loop-- +[b23554f4407] jit-log-opt-loop} +[b2355508b55] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657c9 +0 E939010000 +[b235550a5ef] jit-backend-dump} +[b235550aba4] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141657fc +0 E953010000 +[b235550b843] jit-backend-dump} +[b235550bd68] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165839 +0 E95F010000 +[b2355510f73] jit-backend-dump} +[b23557b5993] {jit-backend +[b23558255a5] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141659cc +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BB68C1FB16497F00004D8B3B4983C70149BB68C1FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284D8B40304889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF48898D40FFFFFF4C898538FFFFFF49BB80C1FB16497F00004D8B034983C00149BB80C1FB16497F00004D89034983FA030F85000000008138806300000F85000000004C8B50104D85D20F84000000004C8B4008498B4A108139582D03000F85000000004D8B5208498B4A08498B52104D8B52184983F8000F8C000000004D39D00F8D000000004D89C14C0FAFC24989CC4C01C14983C1014C8948084983FD000F85000000004883FB017206813BF82200000F850000000049BB28BC2814497F00004D39DE0F85000000004C8B73084983C6010F8000000000488B1C254845A0024883FB000F8C0000000048898D30FFFFFF49BB98C1FB16497F0000498B0B4883C10149BB98C1FB16497F000049890B4D39D10F8D000000004C89C94C0FAFCA4C89E34D01CC4883C101488948084D89F14983C6010F80000000004C8B0C254845A0024983F9000F8C000000004C89A530FFFFFF4989C94989DCE993FFFFFF49BB00501614497F000041FFD32944404838354C510C5400585C031D00000049BB00501614497F000041FFD34440004838354C0C54585C031E00000049BB00501614497F000041FFD3444000284838354C0C54585C031F00000049BB00501614497F000041FFD34440002104284838354C0C54585C032000000049BB00501614497F000041FFD3444000212909054838354C0C54585C032100000049BB00501614497F000041FFD34440002109054838354C0C54585C032200000049BB00501614497F000041FFD335444048384C0C54005C05032300000049BB00501614497F000041FFD344400C48384C005C05032400000049BB00501614497F000041FFD3444038484C0C005C05032500000049BB00501614497F000041FFD344400C39484C0005032600000049BB00501614497F000041FFD34440484C003905032700000049BB00501614497F000041FFD34440484C003905032800000049BB00501614497F000041FFD3444000250931484C6139032900000049BB00501614497F000041FFD3444039484C00310725032A00000049BB00501614497F000041FFD34440484C0039310707032B00000049BB00501614497F000041FFD34440484C0039310707032C000000 +[b235582e8eb] jit-backend-dump} +[b235582eeef] {jit-backend-addr +Loop 2 ( #19 FOR_ITER) has address 7f4914165a02 to 7f4914165bdf (bootstrap 7f49141659cc) +[b235582fc15] jit-backend-addr} +[b2355830257] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141659fe +0 30FFFFFF +[b2355830f57] jit-backend-dump} +[b2355831627] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165ab3 +0 28010000 +[b2355832055] jit-backend-dump} +[b2355832495] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165abf +0 3B010000 +[b2355832f9b] jit-backend-dump} +[b2355833483] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165acc +0 4B010000 +[b2355833ee5] jit-backend-dump} +[b23558343cb] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165ae0 +0 55010000 +[b2355834d9b] jit-backend-dump} +[b235583538d] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165afa +0 5B010000 +[b2355835ced] jit-backend-dump} +[b23558360cd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b03 +0 73010000 +[b2355836949] jit-backend-dump} +[b2355836e33] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b22 +0 74010000 +[b235583792d] jit-backend-dump} +[b2355837dfb] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b34 +0 7F010000 +[b2355838777] jit-backend-dump} +[b2355838b41] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b47 +0 87010000 +[b23558393b9] jit-backend-dump} +[b2355839787] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b55 +0 94010000 +[b235583a01d] jit-backend-dump} +[b235583a49d] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b67 +0 B5010000 +[b235583adb7] jit-backend-dump} +[b235583b297] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b95 +0 A0010000 +[b23558439b9] jit-backend-dump} +[b23558440af] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165bb7 +0 9A010000 +[b2355844afd] jit-backend-dump} +[b2355844fdd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165bc9 +0 BE010000 +[b2355845893] jit-backend-dump} +[b2355846087] jit-backend} +[b2355847e7b] {jit-log-opt-loop +# Loop 2 ( #19 FOR_ITER) : loop with 73 ops [p0, p1] +84: p2 = getfield_gc(p0, descr=) +88: p3 = getfield_gc(p0, descr=) @@ -141,48 +425,48 @@ +131: p16 = getarrayitem_gc(p8, 3, descr=) +135: p18 = getarrayitem_gc(p8, 4, descr=) +139: p19 = getfield_gc(p0, descr=) -+139: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, descr=TargetToken(140669174710784)) ++139: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, descr=TargetToken(139951847709440)) debug_merge_point(0, ' #19 FOR_ITER') -+225: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18] -+235: guard_class(p14, 38562496, descr=) [p1, p0, p14, p2, p3, i4, p5, p10, p12, p16, p18] ++225: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18] ++235: guard_class(p14, 38562496, descr=) [p1, p0, p14, p2, p3, i4, p5, p10, p12, p16, p18] +247: p22 = getfield_gc(p14, descr=) -+251: guard_nonnull(p22, descr=) [p1, p0, p14, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++251: guard_nonnull(p22, descr=) [p1, p0, p14, p22, p2, p3, i4, p5, p10, p12, p16, p18] +260: i23 = getfield_gc(p14, descr=) +264: p24 = getfield_gc(p22, descr=) -+268: guard_class(p24, 38745240, descr=) [p1, p0, p14, i23, p24, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++268: guard_class(p24, 38745240, descr=) [p1, p0, p14, i23, p24, p22, p2, p3, i4, p5, p10, p12, p16, p18] +280: p26 = getfield_gc(p22, descr=) +284: i27 = getfield_gc_pure(p26, descr=) +288: i28 = getfield_gc_pure(p26, descr=) +292: i29 = getfield_gc_pure(p26, descr=) +296: i31 = int_lt(i23, 0) -guard_false(i31, descr=) [p1, p0, p14, i23, i29, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] +guard_false(i31, descr=) [p1, p0, p14, i23, i29, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] +306: i32 = int_ge(i23, i29) -guard_false(i32, descr=) [p1, p0, p14, i23, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] +guard_false(i32, descr=) [p1, p0, p14, i23, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] +315: i33 = int_mul(i23, i28) +322: i34 = int_add(i27, i33) +328: i36 = int_add(i23, 1) +332: setfield_gc(p14, i36, descr=) -+336: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p18, i34] ++336: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p18, i34] debug_merge_point(0, ' #22 STORE_FAST') debug_merge_point(0, ' #25 LOAD_FAST') -+346: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p14, p18, i34] ++346: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p14, p18, i34] debug_merge_point(0, ' #28 LOAD_CONST') -+364: guard_value(p3, ConstPtr(ptr39), descr=) [p1, p0, p3, p2, p5, p10, p14, p18, i34] ++364: guard_value(p3, ConstPtr(ptr39), descr=) [p1, p0, p3, p2, p5, p10, p14, p18, i34] debug_merge_point(0, ' #31 INPLACE_ADD') +383: i40 = getfield_gc_pure(p10, descr=) +387: i42 = int_add_ovf(i40, 1) -guard_no_overflow(, descr=) [p1, p0, p10, i42, p2, p5, p14, i34] +guard_no_overflow(, descr=) [p1, p0, p10, i42, p2, p5, p14, i34] debug_merge_point(0, ' #32 STORE_FAST') debug_merge_point(0, ' #35 JUMP_ABSOLUTE') -+397: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i42, i34] ++397: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i42, i34] +397: i44 = getfield_raw(44057928, descr=) +405: i46 = int_lt(i44, 0) -guard_false(i46, descr=) [p1, p0, p2, p5, p14, i42, i34] +guard_false(i46, descr=) [p1, p0, p2, p5, p14, i42, i34] debug_merge_point(0, ' #19 FOR_ITER') -+415: label(p0, p1, p2, p5, i42, i34, p14, i36, i29, i28, i27, descr=TargetToken(140669174710864)) ++415: label(p0, p1, p2, p5, i42, i34, p14, i36, i29, i28, i27, descr=TargetToken(139951847709520)) debug_merge_point(0, ' #19 FOR_ITER') +452: i47 = int_ge(i36, i29) -guard_false(i47, descr=) [p1, p0, p14, i36, i28, i27, p2, p5, i42, i34] +guard_false(i47, descr=) [p1, p0, p14, i36, i28, i27, p2, p5, i34, i42] +461: i48 = int_mul(i36, i28) +468: i49 = int_add(i27, i48) +474: i50 = int_add(i36, 1) @@ -192,109 +476,109 @@ debug_merge_point(0, ' #31 INPLACE_ADD') +478: setfield_gc(p14, i50, descr=) +482: i51 = int_add_ovf(i42, 1) -guard_no_overflow(, descr=) [p1, p0, i51, p2, p5, p14, i49, i42, None] +guard_no_overflow(, descr=) [p1, p0, i51, p2, p5, p14, i49, None, i42] debug_merge_point(0, ' #32 STORE_FAST') debug_merge_point(0, ' #35 JUMP_ABSOLUTE') -+495: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] ++495: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] +495: i53 = getfield_raw(44057928, descr=) +503: i54 = int_lt(i53, 0) -guard_false(i54, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] +guard_false(i54, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] debug_merge_point(0, ' #19 FOR_ITER') -+513: jump(p0, p1, p2, p5, i51, i49, p14, i50, i29, i28, i27, descr=TargetToken(140669174710864)) ++513: jump(p0, p1, p2, p5, i51, i49, p14, i50, i29, i28, i27, descr=TargetToken(139951847709520)) +531: --end of the loop-- -[19b74ba0bf44] jit-log-opt-loop} -[19b74c18800e] {jit-backend -[19b74c23f902] {jit-backend-dump +[b2355889199] jit-log-opt-loop} +[b2355bbecbf] {jit-backend +[b2355c22b85] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb7e0 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BBD820011BF07F00004D8B3B4983C70149BBD820011BF07F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B40204D8B40284889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48898548FFFFFF4C898540FFFFFF49BB3821011BF07F00004D8B034983C00149BB3821011BF07F00004D89034983FA020F85000000004883FA017206813AF82200000F85000000004983FD000F850000000049BB20172E18F07F00004D39DE0F85000000004C8B72084981FE102700000F8D0000000049BB00000000000000804D39DE0F84000000004C89F0B90200000048899538FFFFFF48898530FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004883FB017206813BF82200000F8500000000488B43084883C0010F8000000000488B9D30FFFFFF4883C3014C8B34254845A0024983FE000F8C0000000049BB5021011BF07F00004D8B334983C60149BB5021011BF07F00004D89334881FB102700000F8D0000000049BB00000000000000804C39DB0F840000000048898528FFFFFF4889D8B90200000048898520FFFFFF489948F7F94889D048C1FA3FBB020000004821D34801D84883F8000F8500000000488B8528FFFFFF4883C0010F8000000000488B9D20FFFFFF4883C301488B14254845A0024883FA000F8C00000000E958FFFFFF49BB00B01B18F07F000041FFD32944404838354C510C085458031400000049BB00B01B18F07F000041FFD34440084838354C0C5458031500000049BB00B01B18F07F000041FFD335444048384C0C0858031600000049BB00B01B18F07F000041FFD3444038484C0C0858031700000049BB00B01B18F07F000041FFD3444008484C0C031800000049BB00B01B18F07F000041FFD344400839484C0C031900000049BB00B01B18F07F000041FFD34440484C0C5C01031A00000049BB00B01B18F07F000041FFD344400C484C5C07031B00000049BB00B01B18F07F000041FFD344400C01484C5C07031C00000049BB00B01B18F07F000041FFD34440484C0D0107031D00000049BB00B01B18F07F000041FFD34440484C0D0107031E00000049BB00B01B18F07F000041FFD34440484C0D01031F00000049BB00B01B18F07F000041FFD344400D484C0701032000000049BB00B01B18F07F000041FFD34440484C016965032100000049BB00B01B18F07F000041FFD3444001484C076965032200000049BB00B01B18F07F000041FFD34440484C0D01070707032300000049BB00B01B18F07F000041FFD34440484C0D010707070324000000 -[19b74c252da2] jit-backend-dump} -[19b74c253a7a] {jit-backend-addr -Loop 1 ( #15 LOAD_FAST) has address 7ff0181bb816 to 7ff0181bba30 (bootstrap 7ff0181bb7e0) -[19b74c2557c0] jit-backend-addr} -[19b74c2565b2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165da6 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BBB0C1FB16497F00004D8B3B4983C70149BBB0C1FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B40204D8B40284889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48898548FFFFFF4C898540FFFFFF49BBC8C1FB16497F00004D8B034983C00149BBC8C1FB16497F00004D89034983FA020F85000000004883FA017206813AF82200000F85000000004983FD000F850000000049BBE0BC2814497F00004D39DE0F85000000004C8B72084981FE102700000F8D0000000049BB00000000000000804D39DE0F84000000004C89F0B90200000048899538FFFFFF48898530FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004883FB017206813BF82200000F8500000000488B43084883C0010F8000000000488B9D30FFFFFF4883C3014C8B34254845A0024983FE000F8C0000000049BBE0C1FB16497F00004D8B334983C60149BBE0C1FB16497F00004D89334881FB102700000F8D0000000049BB00000000000000804C39DB0F840000000048898528FFFFFF4889D8B90200000048898520FFFFFF489948F7F94889D048C1FA3FBB020000004821D34801D84883F8000F8500000000488B8528FFFFFF4883C0010F8000000000488B9D20FFFFFF4883C301488B14254845A0024883FA000F8C00000000E958FFFFFF49BB00501614497F000041FFD32944404838354C510C085458032D00000049BB00501614497F000041FFD34440084838354C0C5458032E00000049BB00501614497F000041FFD335444048384C0C0858032F00000049BB00501614497F000041FFD3444038484C0C0858033000000049BB00501614497F000041FFD3444008484C0C033100000049BB00501614497F000041FFD344400839484C0C033200000049BB00501614497F000041FFD34440484C0C5C01033300000049BB00501614497F000041FFD344400C484C5C07033400000049BB00501614497F000041FFD344400C01484C5C07033500000049BB00501614497F000041FFD34440484C010D07033600000049BB00501614497F000041FFD34440484C010D07033700000049BB00501614497F000041FFD34440484C010D033800000049BB00501614497F000041FFD344400D484C0107033900000049BB00501614497F000041FFD34440484C016569033A00000049BB00501614497F000041FFD3444001484C076569033B00000049BB00501614497F000041FFD34440484C0D01070707033C00000049BB00501614497F000041FFD34440484C0D01070707033D000000 +[b2355c31b31] jit-backend-dump} +[b2355c3224b] {jit-backend-addr +Loop 3 ( #15 LOAD_FAST) has address 7f4914165ddc to 7f4914165ff6 (bootstrap 7f4914165da6) +[b2355c33115] jit-backend-addr} +[b2355c338c1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb812 +0 20FFFFFF -[19b74c258028] jit-backend-dump} -[19b74c258d48] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165dd8 +0 20FFFFFF +[b2355c3447f] jit-backend-dump} +[b2355c34b07] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb8bc +0 70010000 -[19b74c25a404] jit-backend-dump} -[19b74c25add0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165e82 +0 70010000 +[b2355c3543f] jit-backend-dump} +[b2355c3589b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb8ce +0 7C010000 -[19b74c26f3e6] jit-backend-dump} -[19b74c270370] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165e94 +0 7C010000 +[b2355c36161] jit-backend-dump} +[b2355c36549] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb8d8 +0 8E010000 -[19b74c271f06] jit-backend-dump} -[19b74c272986] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165e9e +0 8E010000 +[b2355c36edf] jit-backend-dump} +[b2355c373a9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb8eb +0 96010000 -[19b74c273fe2] jit-backend-dump} -[19b74c274a38] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165eb1 +0 96010000 +[b2355c37db7] jit-backend-dump} +[b2355c38291] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb8fc +0 9F010000 -[19b74c2760e8] jit-backend-dump} -[19b74c276b26] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165ec2 +0 9F010000 +[b2355c38b0b] jit-backend-dump} +[b2355c38ef3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb90f +0 A4010000 -[19b74c278188] jit-backend-dump} -[19b74c278aac] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165ed5 +0 A4010000 +[b2355c3976d] jit-backend-dump} +[b2355c39b67] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb947 +0 85010000 -[19b74c279fb2] jit-backend-dump} -[19b74c27a8d6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f0d +0 85010000 +[b2355c3a3e1] jit-backend-dump} +[b2355c3a803] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb959 +0 8C010000 -[19b74c27be0c] jit-backend-dump} -[19b74c27c754] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f1f +0 8C010000 +[b2355c3b393] jit-backend-dump} +[b2355c3b845] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb967 +0 97010000 -[19b74c27ddc8] jit-backend-dump} -[19b74c27ea70] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f2d +0 97010000 +[b2355c3c245] jit-backend-dump} +[b2355c3c753] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb984 +0 AD010000 -[19b74c27ffee] jit-backend-dump} -[19b74c280954] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f4a +0 AD010000 +[b2355c3cfdd] jit-backend-dump} +[b2355c3d3bd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb9af +0 9B010000 -[19b74c281e78] jit-backend-dump} -[19b74c2827c0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f75 +0 9B010000 +[b2355c3dc67] jit-backend-dump} +[b2355c3e061] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb9c2 +0 A0010000 -[19b74c283c6c] jit-backend-dump} -[19b74c2845a8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f88 +0 A0010000 +[b2355c3ea79] jit-backend-dump} +[b2355c3ef51] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb9f9 +0 82010000 -[19b74c285ade] jit-backend-dump} -[19b74c28658e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165fbf +0 82010000 +[b2355c3f941] jit-backend-dump} +[b2355c3fd27] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bba0a +0 8A010000 -[19b74c287c08] jit-backend-dump} -[19b74c288754] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165fd0 +0 8A010000 +[b2355c4068b] jit-backend-dump} +[b2355c40ac9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bba27 +0 A2010000 -[19b74c289c90] jit-backend-dump} -[19b74c28aede] jit-backend} -[19b74c28e80e] {jit-log-opt-loop -# Loop 1 ( #15 LOAD_FAST) : loop with 92 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165fed +0 A2010000 +[b2355c41369] jit-backend-dump} +[b2355c41b97] jit-backend} +[b2355c43773] {jit-log-opt-loop +# Loop 3 ( #15 LOAD_FAST) : loop with 92 ops [p0, p1] +84: p2 = getfield_gc(p0, descr=) +88: p3 = getfield_gc(p0, descr=) @@ -308,37 +592,37 @@ +127: p14 = getarrayitem_gc(p8, 2, descr=) +131: p16 = getarrayitem_gc(p8, 3, descr=) +135: p17 = getfield_gc(p0, descr=) -+135: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, descr=TargetToken(140669174715984)) ++135: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, descr=TargetToken(139951847710560)) debug_merge_point(0, ' #15 LOAD_FAST') -+214: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16] -+224: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, i4, p5, p10, p14, p16] -+242: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p16] ++214: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16] ++224: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, i4, p5, p10, p14, p16] ++242: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p16] debug_merge_point(0, ' #18 LOAD_CONST') -+252: guard_value(p3, ConstPtr(ptr21), descr=) [p1, p0, p3, p2, p5, p10, p12, p16] ++252: guard_value(p3, ConstPtr(ptr21), descr=) [p1, p0, p3, p2, p5, p10, p12, p16] debug_merge_point(0, ' #21 COMPARE_OP') +271: i22 = getfield_gc_pure(p12, descr=) +275: i24 = int_lt(i22, 10000) -guard_true(i24, descr=) [p1, p0, p12, p2, p5, p10] +guard_true(i24, descr=) [p1, p0, p12, p2, p5, p10] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') +288: i26 = int_eq(i22, -9223372036854775808) -guard_false(i26, descr=) [p1, p0, p12, i22, p2, p5, p10] +guard_false(i26, descr=) [p1, p0, p12, i22, p2, p5, p10] +307: i28 = int_mod(i22, 2) +334: i30 = int_rshift(i28, 63) +341: i31 = int_and(2, i30) +350: i32 = int_add(i28, i31) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') +353: i33 = int_is_true(i32) -guard_false(i33, descr=) [p1, p0, p2, p5, p10, p12, i32] +guard_false(i33, descr=) [p1, p0, p2, p5, p10, p12, i32] debug_merge_point(0, ' #53 LOAD_FAST') -+363: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p5, p12, None] ++363: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p5, p12, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') +381: i36 = getfield_gc_pure(p10, descr=) +385: i38 = int_add_ovf(i36, 1) -guard_no_overflow(, descr=) [p1, p0, p10, i38, p2, p5, p12, None] +guard_no_overflow(, descr=) [p1, p0, p10, i38, p2, p5, p12, None] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') @@ -346,35 +630,35 @@ +395: i40 = int_add(i22, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+406: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i40, i38, None] ++406: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i38, i40, None] +406: i42 = getfield_raw(44057928, descr=) +414: i44 = int_lt(i42, 0) -guard_false(i44, descr=) [p1, p0, p2, p5, i40, i38, None] +guard_false(i44, descr=) [p1, p0, p2, p5, i38, i40, None] debug_merge_point(0, ' #15 LOAD_FAST') -+424: label(p0, p1, p2, p5, i38, i40, descr=TargetToken(140669174716064)) ++424: label(p0, p1, p2, p5, i38, i40, descr=TargetToken(139951847710640)) debug_merge_point(0, ' #15 LOAD_FAST') debug_merge_point(0, ' #18 LOAD_CONST') debug_merge_point(0, ' #21 COMPARE_OP') +454: i45 = int_lt(i40, 10000) -guard_true(i45, descr=) [p1, p0, p2, p5, i40, i38] +guard_true(i45, descr=) [p1, p0, p2, p5, i38, i40] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') +467: i46 = int_eq(i40, -9223372036854775808) -guard_false(i46, descr=) [p1, p0, i40, p2, p5, None, i38] +guard_false(i46, descr=) [p1, p0, i40, p2, p5, i38, None] +486: i47 = int_mod(i40, 2) +513: i48 = int_rshift(i47, 63) +520: i49 = int_and(2, i48) +528: i50 = int_add(i47, i49) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') +531: i51 = int_is_true(i50) -guard_false(i51, descr=) [p1, p0, p2, p5, i50, i40, i38] +guard_false(i51, descr=) [p1, p0, p2, p5, i50, i38, i40] debug_merge_point(0, ' #53 LOAD_FAST') debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') +541: i52 = int_add_ovf(i38, 1) -guard_no_overflow(, descr=) [p1, p0, i52, p2, p5, None, i40, i38] +guard_no_overflow(, descr=) [p1, p0, i52, p2, p5, None, i38, i40] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') @@ -382,61 +666,61 @@ +558: i53 = int_add(i40, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+569: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] ++569: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] +569: i54 = getfield_raw(44057928, descr=) +577: i55 = int_lt(i54, 0) -guard_false(i55, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] +guard_false(i55, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+587: jump(p0, p1, p2, p5, i52, i53, descr=TargetToken(140669174716064)) ++587: jump(p0, p1, p2, p5, i52, i53, descr=TargetToken(139951847710640)) +592: --end of the loop-- -[19b74c3260a4] jit-log-opt-loop} -[19b74c4a8048] {jit-backend -[19b74c520144] {jit-backend-dump +[b2355c89905] jit-log-opt-loop} +[b2355d4588f] {jit-backend +[b2355d837a3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbbf5 +0 488DA50000000049BB6821011BF07F00004D8B234983C40149BB6821011BF07F00004D89234C8BA558FFFFFF498B54241048C740100000000041813C24388F01000F85000000004D8B6424184983FC020F85000000004885D20F8500000000488B9570FFFFFF4C8B6268488B042530255601488D5020483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700F8220000488B9570FFFFFF40C68295000000014C8B8D60FFFFFFF64204017417504151524889D74C89CE41BBF0C4C50041FFD35A4159584C894A50F6420401741D50524889D749BB68162E18F07F00004C89DE41BBF0C4C50041FFD35A5849BB68162E18F07F00004C895A7840C682960000000048C742600000000048C782800000000200000048C742582A00000041F644240401742641F6442404407518504C89E7BE000000004889C241BB50C2C50041FFD358EB0641804C24FF0149894424104889C24883C01048C700F82200004C8B8D30FFFFFF4C89480841F644240401742841F644240440751A52504C89E7BE010000004889C241BB50C2C50041FFD3585AEB0641804C24FF01498944241849C74424200000000049C74424280000000049C7442430000000004C89720848891425B039720141BBD01BF30041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00B01B18F07F000041FFD344403048086139032500000049BB00B01B18F07F000041FFD344403148086139032600000049BB00B01B18F07F000041FFD34440084861390327000000 -[19b74c52d9da] jit-backend-dump} -[19b74c52f05a] {jit-backend-addr -bridge out of Guard 16 has address 7ff0181bbbf5 to 7ff0181bbdee -[19b74c5309bc] jit-backend-addr} -[19b74c53156e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141661bb +0 488DA50000000049BBF8C1FB16497F00004D8B234983C40149BBF8C1FB16497F00004D89234C8BA558FFFFFF498B54241048C740100000000041813C24388F01000F85000000004D8B6424184983FC020F85000000004885D20F8500000000488B9570FFFFFF4C8B6268488B042530255601488D5020483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700F8220000488B9570FFFFFF40C68295000000014C8B8D60FFFFFFF64204017417415150524889D74C89CE41BBF0C4C50041FFD35A5841594C894A50F6420401741D50524889D749BB28BC2814497F00004C89DE41BBF0C4C50041FFD35A5849BB28BC2814497F00004C895A7840C682960000000048C742600000000048C782800000000200000048C742582A00000041F644240401742641F6442404407518504C89E7BE000000004889C241BB50C2C50041FFD358EB0641804C24FF0149894424104889C24883C01048C700F82200004C8B8D30FFFFFF4C89480841F644240401742841F644240440751A52504C89E7BE010000004889C241BB50C2C50041FFD3585AEB0641804C24FF01498944241849C74424200000000049C74424280000000049C7442430000000004C89720848891425B039720141BBD01BF30041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00501614497F000041FFD344403048083961033E00000049BB00501614497F000041FFD344403148083961033F00000049BB00501614497F000041FFD34440084839610340000000 +[b2355d89d2b] jit-backend-dump} +[b2355d8a315] {jit-backend-addr +bridge out of Guard 41 has address 7f49141661bb to 7f49141663b4 +[b2355d8af37] jit-backend-addr} +[b2355d8b501] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbbf8 +0 A0FEFFFF -[19b74c532efa] jit-backend-dump} -[19b74c533b84] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141661be +0 A0FEFFFF +[b2355d8bfaf] jit-backend-dump} +[b2355d8c6cd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbc38 +0 B2010000 -[19b74c535276] jit-backend-dump} -[19b74c535c00] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141661fe +0 B2010000 +[b2355d8d16f] jit-backend-dump} +[b2355d8d599] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbc47 +0 BC010000 -[19b74c537178] jit-backend-dump} -[19b74c537aa2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416620d +0 BC010000 +[b2355d8dfb7] jit-backend-dump} +[b2355d8e45f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbc50 +0 CC010000 -[19b74c539056] jit-backend-dump} -[19b74c539f32] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166216 +0 CC010000 +[b2355d8ed83] jit-backend-dump} +[b2355d8f3ab] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb5cf +0 22060000 -[19b74c53b552] jit-backend-dump} -[19b74c53c4b2] jit-backend} -[19b74c53de98] {jit-log-opt-bridge -# bridge out of Guard 16 with 28 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165b95 +0 22060000 +[b2355d8fc45] jit-backend-dump} +[b2355d9035b] jit-backend} +[b2355d90e8b] {jit-log-opt-bridge +# bridge out of Guard 41 with 28 ops [p0, p1, p2, i3, i4, i5, p6, p7, i8, i9] debug_merge_point(0, ' #38 POP_BLOCK') +37: p10 = getfield_gc_pure(p7, descr=) +49: setfield_gc(p2, ConstPtr(ptr11), descr=) -+57: guard_class(p7, 38639224, descr=) [p0, p1, p7, p6, p10, i9, i8] ++57: guard_class(p7, 38639224, descr=) [p0, p1, p7, p6, p10, i9, i8] +71: i13 = getfield_gc_pure(p7, descr=) -+76: guard_value(i13, 2, descr=) [p0, p1, i13, p6, p10, i9, i8] ++76: guard_value(i13, 2, descr=) [p0, p1, i13, p6, p10, i9, i8] debug_merge_point(0, ' #39 LOAD_FAST') debug_merge_point(0, ' #42 RETURN_VALUE') -+86: guard_isnull(p10, descr=) [p0, p1, p10, p6, i9, i8] ++86: guard_isnull(p10, descr=) [p0, p1, p10, p6, i9, i8] +95: p15 = getfield_gc(p1, descr=) +106: p16 = getfield_gc(p1, descr=) p18 = new_with_vtable(ConstClass(W_IntObject)) @@ -449,161 +733,161 @@ +300: setfield_gc(p1, 42, descr=) setarrayitem_gc(p15, 0, p18, descr=) p27 = new_with_vtable(ConstClass(W_IntObject)) -+373: setfield_gc(p27, i9, descr=) ++373: setfield_gc(p27, i8, descr=) setarrayitem_gc(p15, 1, p27, descr=) +437: setarrayitem_gc(p15, 2, ConstPtr(ptr30), descr=) +446: setarrayitem_gc(p15, 3, ConstPtr(ptr32), descr=) +455: setarrayitem_gc(p15, 4, ConstPtr(ptr32), descr=) -+464: setfield_gc(p18, i8, descr=) ++464: setfield_gc(p18, i9, descr=) +468: finish(p18, descr=) +505: --end of the loop-- -[19b74c584530] jit-log-opt-bridge} -[19b74cf8883a] {jit-backend -[19b74d4b2a6c] {jit-backend-dump +[b2355db3bdd] jit-log-opt-bridge} +[b2356568dd9] {jit-backend +[b2356807229] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbe87 +0 488DA50000000049BB8021011BF07F0000498B034883C00149BB8021011BF07F0000498903488B8570FFFFFF4C8B780849BBA8CB2D18F07F00004D39DF0F85000000004D8B771049BBC0CB2D18F07F00004D39DE0F850000000041BB201B8D0041FFD34C8B78404C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8B342500D785014981FE201288010F85000000004C8B34254845A0024983FE000F8C0000000048898518FFFFFF488B042530255601488D9048010000483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700488701004889C24881C09800000048C7008800000048C74008050000004989C64883C03848C700F82200004989C54883C01048C700F82200004989C44883C01048C700806300004989C24883C01848C700783600004989C14883C01848C7008800000048C74008000000004989C04883C01048C700508A010048896808488BBD18FFFFFFF6470401741E4151524152415050574889C641BBF0C4C50041FFD35F584158415A5A415948894740488BB570FFFFFF48896E184C897A3049C74508010000004D896E104D89661849C74110400FA10149BB809CFA1AF07F00004D8959084D894A1049C74208010000004D8956204C89726848C742700200000049BBA8CB2D18F07F00004C895A0848C742581300000048C7828000000003000000C782900000001500000049BB68162E18F07F00004C895A7849BBA09CFA1AF07F00004C895A604C89422848899510FFFFFF48898508FFFFFF48C78578FFFFFF280000004889FE4889D749BB06B41B18F07F000041FFD34883F80174154889C7488BB510FFFFFF41BB4091940041FFD3EB23488B8510FFFFFF48C7401800000000488B0425B039720148C70425B0397201000000004883BD78FFFFFF000F8C0000000048833C25A046A002000F8500000000488BB518FFFFFF488B56504885D20F8500000000488B5628488BBD10FFFFFF48C74750000000004883FA000F8500000000488B56404C8B47304C0FB6B794000000F6460401741B5750524150564889F74C89C641BBF0C4C50041FFD35E41585A585F4C8946404D85F60F85000000004C8BB508FFFFFF49C74608FDFFFFFF8138F82200000F85000000004C8B7008488BB528FFFFFF4C01F60F8000000000488B8520FFFFFF4883C0010F80000000004C8B34254845A0024983FE000F8C0000000049BB9821011BF07F00004D8B334983C60149BB9821011BF07F00004D89334881F8102700000F8D0000000049BB00000000000000804C39D80F8400000000B90200000048898500FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004889F04883C6010F8000000000488B8500FFFFFF4883C0014C8B34254845A0024983FE000F8C000000004889C34889F049BB88B91B18F07F000041FFE349BB00B01B18F07F000041FFD344003C484C6965032900000049BB00B01B18F07F000041FFD34400383C484C6965032A00000049BB00B01B18F07F000041FFD344003C484C6965032B00000049BB00B01B18F07F000041FFD344400038484C3C156965032C00000049BB00B01B18F07F000041FFD3444000484C3C156965032D00000049BB00B01B18F07F000041FFD3444000484C3C156965032E00000049BB00B01B18F07F000041FFD344400038484C3C156965032F00000049BB00B01B18F07F000041FFD3444000484C3C156965033000000049BB43B01B18F07F000041FFD344406C700074484C6965032800000049BB43B01B18F07F000041FFD344406C700074484C6965033100000049BB00B01B18F07F000041FFD344401800700874484C6965033200000049BB00B01B18F07F000041FFD34440001C1874484C6965033300000049BB00B01B18F07F000041FFD3444000081C1874484C6965033400000049BB00B01B18F07F000041FFD3444000484C6965033500000049BB00B01B18F07F000041FFD344400019484C6965033600000049BB00B01B18F07F000041FFD3444001484C196907033700000049BB00B01B18F07F000041FFD34440484C01190707033800000049BB00B01B18F07F000041FFD34440484C01190707033900000049BB00B01B18F07F000041FFD34440484C1901033A00000049BB00B01B18F07F000041FFD3444001484C1907033B00000049BB00B01B18F07F000041FFD34440484C011979033C00000049BB00B01B18F07F000041FFD3444019484C070179033D00000049BB00B01B18F07F000041FFD34440484C1901070707033E00000049BB00B01B18F07F000041FFD34440484C1901070707033F000000 -[19b74d4e0462] jit-backend-dump} -[19b74d4e194a] {jit-backend-addr -bridge out of Guard 33 has address 7ff0181bbe87 to 7ff0181bc2ae -[19b74d4e37e6] jit-backend-addr} -[19b74d4e49c2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416644d +0 488DA50000000049BB10C2FB16497F0000498B034883C00149BB10C2FB16497F0000498903488B8570FFFFFF4C8B780849BBA86B2814497F00004D39DF0F85000000004D8B771049BBC06B2814497F00004D39DE0F850000000041BB201B8D0041FFD34C8B78404C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8B342500D785014981FE201288010F85000000004C8B34254845A0024983FE000F8C0000000048898518FFFFFF488B042530255601488D9048010000483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700488701004889C24881C09800000048C7008800000048C74008050000004989C64883C03848C700F82200004989C54883C01048C700F82200004989C44883C01048C700806300004989C24883C01848C700783600004989C14883C01848C7008800000048C74008000000004989C04883C01048C700508A010048896808488BBD18FFFFFFF6470401741E4150524152415150574889C641BBF0C4C50041FFD35F584159415A5A415848894740488BB570FFFFFF48896E184C897A3049C74508010000004D896E104D89661849C74110400FA10149BB80D2F716497F00004D8959084D894A1049C74208010000004D8956204C89726848C742700200000049BBA86B2814497F00004C895A0848C742581300000048C7828000000003000000C782900000001500000049BB28BC2814497F00004C895A7849BBA0D2F716497F00004C895A604C89422848899510FFFFFF48898508FFFFFF48C78578FFFFFF410000004889FE4889D749BBCC591614497F000041FFD34883F80174154889C7488BB510FFFFFF41BB4091940041FFD3EB23488B8510FFFFFF48C7401800000000488B0425B039720148C70425B0397201000000004883BD78FFFFFF000F8C0000000048833C25A046A002000F8500000000488BB518FFFFFF488B56504885D20F8500000000488B5628488BBD10FFFFFF48C74750000000004883FA000F8500000000488B56404C8B47304C0FB6B794000000F6460401741B4150575256504889F74C89C641BBF0C4C50041FFD3585E5A5F41584C8946404D85F60F85000000004C8BB508FFFFFF49C74608FDFFFFFF8138F82200000F85000000004C8B7008488BB528FFFFFF4C01F60F8000000000488B8520FFFFFF4883C0010F80000000004C8B34254845A0024983FE000F8C0000000049BB28C2FB16497F00004D8B334983C60149BB28C2FB16497F00004D89334881F8102700000F8D0000000049BB00000000000000804C39D80F8400000000B90200000048898500FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004889F04883C6010F8000000000488B8500FFFFFF4883C0014C8B34254845A0024983FE000F8C000000004889C34889F049BB4E5F1614497F000041FFE349BB00501614497F000041FFD344003C484C6965034200000049BB00501614497F000041FFD34400383C484C6965034300000049BB00501614497F000041FFD344003C484C6965034400000049BB00501614497F000041FFD344400038484C153C6965034500000049BB00501614497F000041FFD3444000484C153C6965034600000049BB00501614497F000041FFD3444000484C153C6965034700000049BB00501614497F000041FFD344400038484C153C6965034800000049BB00501614497F000041FFD3444000484C153C6965034900000049BB43501614497F000041FFD344406C700074484C6965034100000049BB43501614497F000041FFD344406C700074484C6965034A00000049BB00501614497F000041FFD344401800700874484C6965034B00000049BB00501614497F000041FFD34440001C1874484C6965034C00000049BB00501614497F000041FFD3444000081C1874484C6965034D00000049BB00501614497F000041FFD3444000484C6965034E00000049BB00501614497F000041FFD344400019484C6965034F00000049BB00501614497F000041FFD3444001484C196907035000000049BB00501614497F000041FFD34440484C01190707035100000049BB00501614497F000041FFD34440484C01190707035200000049BB00501614497F000041FFD34440484C0119035300000049BB00501614497F000041FFD3444001484C0719035400000049BB00501614497F000041FFD34440484C017919035500000049BB00501614497F000041FFD3444019484C077901035600000049BB00501614497F000041FFD34440484C1901070707035700000049BB00501614497F000041FFD34440484C19010707070358000000 +[b235681f90f] jit-backend-dump} +[b23568201af] {jit-backend-addr +bridge out of Guard 58 has address 7f491416644d to 7f4914166874 +[b2356821005] jit-backend-addr} +[b2356821755] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbe8a +0 70FEFFFF -[19b74d4e64d4] jit-backend-dump} -[19b74d4e76b6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166450 +0 70FEFFFF +[b23568223f1] jit-backend-dump} +[b2356822c65] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbec6 +0 E4030000 -[19b74d4e8dc0] jit-backend-dump} -[19b74d4e97e6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416648c +0 E4030000 +[b235682369f] jit-backend-dump} +[b2356823b9b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbedd +0 E6030000 -[19b74d4ead88] jit-backend-dump} -[19b74d4eb946] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664a3 +0 E6030000 +[b235682470d] jit-backend-dump} +[b2356824dab] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbef7 +0 FF030000 -[19b74d4ece6a] jit-backend-dump} -[19b74d4ed7e8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664bd +0 FF030000 +[b2356825801] jit-backend-dump} +[b2356825d0d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbf05 +0 0D040000 -[19b74d4eeda2] jit-backend-dump} -[19b74d4ef864] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664cb +0 0D040000 +[b23568265f9] jit-backend-dump} +[b2356826a35] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbf1a +0 2E040000 -[19b74d4f0ec6] jit-backend-dump} -[19b74d4f18e6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664e0 +0 2E040000 +[b23568272f1] jit-backend-dump} +[b23568276e9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbf2c +0 38040000 -[19b74d4f2d6e] jit-backend-dump} -[19b74d4f36aa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664f2 +0 38040000 +[b2356827fbf] jit-backend-dump} +[b23568284eb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc131 +0 4E020000 -[19b74d4f4b98] jit-backend-dump} -[19b74d4f54bc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141666f7 +0 4E020000 +[b2356828fb5] jit-backend-dump} +[b23568294b7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc140 +0 5B020000 -[19b74d4f693e] jit-backend-dump} -[19b74d4f7382] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166706 +0 5B020000 +[b2356829f1d] jit-backend-dump} +[b235682a31d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc154 +0 63020000 -[19b74d4f89c0] jit-backend-dump} -[19b74d4f9428] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416671a +0 63020000 +[b235682abd3] jit-backend-dump} +[b235682afd1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc171 +0 63020000 -[19b74d4fa96a] jit-backend-dump} -[19b74d4fb27c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166737 +0 63020000 +[b235682b891] jit-backend-dump} +[b235682bc7b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc1af +0 41020000 -[19b74d4fc75e] jit-backend-dump} -[19b74d4fd094] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166775 +0 41020000 +[b235682c6bb] jit-backend-dump} +[b235682cbc9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc1ca +0 43020000 -[19b74d4fe516] jit-backend-dump} -[19b74d4fee52] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166790 +0 43020000 +[b235682d631] jit-backend-dump} +[b235682da4b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc1de +0 48020000 -[19b74d500574] jit-backend-dump} -[19b74d500fca] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141667a4 +0 48020000 +[b235682e301] jit-backend-dump} +[b235682e6eb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc1ef +0 51020000 -[19b74d502506] jit-backend-dump} -[19b74d5035ce] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141667b5 +0 51020000 +[b235682efbf] jit-backend-dump} +[b235682f7c5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc201 +0 73020000 -[19b74d504a86] jit-backend-dump} -[19b74d5053d4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141667c7 +0 73020000 +[b2356830083] jit-backend-dump} +[b23568304ad] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc22c +0 62020000 -[19b74d5068b0] jit-backend-dump} -[19b74d5071d4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141667f2 +0 62020000 +[b2356830ee9] jit-backend-dump} +[b23568313b7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc23f +0 67020000 -[19b74d508686] jit-backend-dump} -[19b74d5090b8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166805 +0 67020000 +[b23568343ff] jit-backend-dump} +[b235683496d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc26d +0 52020000 -[19b74d50a6b4] jit-backend-dump} -[19b74d50afd8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166833 +0 52020000 +[b23568353f9] jit-backend-dump} +[b235683589d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc27a +0 5E020000 -[19b74d50c490] jit-backend-dump} -[19b74d50ce1a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166840 +0 5E020000 +[b2356836179] jit-backend-dump} +[b23568365f5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc297 +0 76020000 -[19b74d50e308] jit-backend-dump} -[19b74d50f0fa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416685d +0 76020000 +[b2356836ff7] jit-backend-dump} +[b235683759d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb9f9 +0 8A040000 -[19b74d5105ca] jit-backend-dump} -[19b74d511830] jit-backend} -[19b74d513bdc] {jit-log-opt-bridge -# bridge out of Guard 33 with 138 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165fbf +0 8A040000 +[b235683802b] jit-backend-dump} +[b235683892d] jit-backend} +[b235683997d] {jit-log-opt-bridge +# bridge out of Guard 58 with 138 ops [p0, p1, p2, p3, i4, i5, i6] debug_merge_point(0, ' #37 LOAD_FAST') debug_merge_point(0, ' #40 LOAD_GLOBAL') +37: p7 = getfield_gc(p1, descr=) -+48: guard_value(p7, ConstPtr(ptr8), descr=) [p0, p1, p7, p2, p3, i5, i6] ++48: guard_value(p7, ConstPtr(ptr8), descr=) [p0, p1, p7, p2, p3, i6, i5] +67: p9 = getfield_gc(p7, descr=) -+71: guard_value(p9, ConstPtr(ptr10), descr=) [p0, p1, p9, p7, p2, p3, i5, i6] -+90: guard_not_invalidated(, descr=) [p0, p1, p7, p2, p3, i5, i6] ++71: guard_value(p9, ConstPtr(ptr10), descr=) [p0, p1, p9, p7, p2, p3, i6, i5] ++90: guard_not_invalidated(, descr=) [p0, p1, p7, p2, p3, i6, i5] debug_merge_point(0, ' #43 CALL_FUNCTION') +90: p12 = call(ConstClass(getexecutioncontext), descr=) +99: p13 = getfield_gc(p12, descr=) +103: i14 = force_token() +103: p15 = getfield_gc(p12, descr=) -+107: guard_isnull(p15, descr=) [p0, p1, p12, p15, p2, p3, p13, i14, i5, i6] ++107: guard_isnull(p15, descr=) [p0, p1, p12, p15, p2, p3, i14, p13, i6, i5] +116: i16 = getfield_gc(p12, descr=) +120: i17 = int_is_zero(i16) -guard_true(i17, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] +guard_true(i17, descr=) [p0, p1, p12, p2, p3, i14, p13, i6, i5] debug_merge_point(1, ' #0 LOAD_CONST') debug_merge_point(1, ' #3 STORE_FAST') debug_merge_point(1, ' #6 SETUP_LOOP') debug_merge_point(1, ' #9 LOAD_GLOBAL') -+130: guard_not_invalidated(, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] ++130: guard_not_invalidated(, descr=) [p0, p1, p12, p2, p3, i14, p13, i6, i5] +130: p19 = getfield_gc(ConstPtr(ptr18), descr=) -+138: guard_value(p19, ConstPtr(ptr20), descr=) [p0, p1, p12, p19, p2, p3, p13, i14, i5, i6] ++138: guard_value(p19, ConstPtr(ptr20), descr=) [p0, p1, p12, p19, p2, p3, i14, p13, i6, i5] debug_merge_point(1, ' #12 LOAD_CONST') debug_merge_point(1, ' #15 CALL_FUNCTION') debug_merge_point(1, ' #18 GET_ITER') @@ -616,7 +900,7 @@ debug_merge_point(1, ' #35 JUMP_ABSOLUTE') +151: i22 = getfield_raw(44057928, descr=) +159: i24 = int_lt(i22, 0) -guard_false(i24, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] +guard_false(i24, descr=) [p0, p1, p12, p2, p3, i14, p13, i6, i5] debug_merge_point(1, ' #19 FOR_ITER') +169: i25 = force_token() p27 = new_with_vtable(38637192) @@ -648,64 +932,64 @@ +534: setfield_gc(p27, ConstPtr(ptr53), descr=) +548: setfield_gc(p27, ConstPtr(ptr54), descr=) +562: setfield_gc(p27, p39, descr=) -+566: p55 = call_assembler(p27, p12, descr=) -guard_not_forced(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] ++566: p55 = call_assembler(p27, p12, descr=) +guard_not_forced(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i6, i5] +686: keepalive(p27) -+686: guard_no_exception(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] ++686: guard_no_exception(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i6, i5] +701: p56 = getfield_gc(p12, descr=) -+712: guard_isnull(p56, descr=) [p0, p1, p12, p55, p27, p56, p41, p2, p3, i5, i6] ++712: guard_isnull(p56, descr=) [p0, p1, p12, p55, p27, p56, p41, p2, p3, i6, i5] +721: i57 = getfield_gc(p12, descr=) +725: setfield_gc(p27, ConstPtr(ptr58), descr=) +740: i59 = int_is_true(i57) -guard_false(i59, descr=) [p0, p1, p55, p27, p12, p41, p2, p3, i5, i6] +guard_false(i59, descr=) [p0, p1, p55, p27, p12, p41, p2, p3, i6, i5] +750: p60 = getfield_gc(p12, descr=) +754: p61 = getfield_gc(p27, descr=) +758: i62 = getfield_gc(p27, descr=) setfield_gc(p12, p61, descr=) -+803: guard_false(i62, descr=) [p0, p1, p55, p60, p27, p12, p41, p2, p3, i5, i6] ++803: guard_false(i62, descr=) [p0, p1, p55, p60, p27, p12, p41, p2, p3, i6, i5] debug_merge_point(0, ' #46 INPLACE_ADD') +812: setfield_gc(p41, -3, descr=) -+827: guard_class(p55, ConstClass(W_IntObject), descr=) [p0, p1, p55, p2, p3, i5, i6] ++827: guard_class(p55, ConstClass(W_IntObject), descr=) [p0, p1, p55, p2, p3, i6, i5] +839: i65 = getfield_gc_pure(p55, descr=) -+843: i66 = int_add_ovf(i6, i65) -guard_no_overflow(, descr=) [p0, p1, p55, i66, p2, p3, i5, i6] ++843: i66 = int_add_ovf(i5, i65) +guard_no_overflow(, descr=) [p0, p1, p55, i66, p2, p3, i6, i5] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 JUMP_FORWARD') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+859: i68 = int_add_ovf(i5, 1) -guard_no_overflow(, descr=) [p0, p1, i68, p2, p3, i66, i5, None] ++859: i68 = int_add_ovf(i6, 1) +guard_no_overflow(, descr=) [p0, p1, i68, p2, p3, i66, i6, None] debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+876: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i68, i66, None, None] ++876: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i68, i66, None, None] +876: i71 = getfield_raw(44057928, descr=) +884: i73 = int_lt(i71, 0) -guard_false(i73, descr=) [p0, p1, p2, p3, i68, i66, None, None] +guard_false(i73, descr=) [p0, p1, p2, p3, i68, i66, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+894: label(p1, p0, p2, p3, i66, i68, descr=TargetToken(140669174718064)) ++894: label(p1, p0, p2, p3, i66, i68, descr=TargetToken(139951894596208)) debug_merge_point(0, ' #18 LOAD_CONST') debug_merge_point(0, ' #21 COMPARE_OP') +924: i75 = int_lt(i68, 10000) -guard_true(i75, descr=) [p0, p1, p2, p3, i66, i68] +guard_true(i75, descr=) [p0, p1, p2, p3, i68, i66] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') +937: i77 = int_eq(i68, -9223372036854775808) -guard_false(i77, descr=) [p0, p1, i68, p2, p3, i66, None] +guard_false(i77, descr=) [p0, p1, i68, p2, p3, None, i66] +956: i79 = int_mod(i68, 2) +973: i81 = int_rshift(i79, 63) +980: i82 = int_and(2, i81) +989: i83 = int_add(i79, i82) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') +992: i84 = int_is_true(i83) -guard_false(i84, descr=) [p0, p1, p2, p3, i83, i66, i68] +guard_false(i84, descr=) [p0, p1, p2, p3, i83, i68, i66] debug_merge_point(0, ' #53 LOAD_FAST') debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') +1002: i86 = int_add_ovf(i66, 1) -guard_no_overflow(, descr=) [p0, p1, i86, p2, p3, None, i66, i68] +guard_no_overflow(, descr=) [p0, p1, i86, p2, p3, None, i68, i66] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') @@ -713,156 +997,156 @@ +1015: i88 = int_add(i68, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+1026: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] ++1026: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] +1026: i90 = getfield_raw(44057928, descr=) +1034: i92 = int_lt(i90, 0) -guard_false(i92, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] +guard_false(i92, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+1044: jump(p1, p0, p2, p3, i86, i88, descr=TargetToken(140669174716064)) ++1044: jump(p1, p0, p2, p3, i86, i88, descr=TargetToken(139951847710640)) +1063: --end of the loop-- -[19b74d610f80] jit-log-opt-bridge} -[19b74d82de0e] {jit-backend-dump +[b23568a15d7] jit-log-opt-bridge} +[b2356998697] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bb976 +0 E9A1010000 -[19b74d83255a] jit-backend-dump} -[19b74d8331c0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165f3c +0 E9A1010000 +[b235699a901] jit-backend-dump} +[b235699ae9b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bba19 +0 E994010000 -[19b74d834be2] jit-backend-dump} -[19b74d83567a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914165fdf +0 E994010000 +[b235699bb83] jit-backend-dump} +[b235699c09f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbee1 +0 E9FB030000 -[19b74d836ff4] jit-backend-dump} -[19b74d837a3e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664a7 +0 E9FB030000 +[b235699cac5] jit-backend-dump} +[b235699ceb3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bbf09 +0 E923040000 -[19b74d83903a] jit-backend-dump} -[19b74d839994] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141664cf +0 E923040000 +[b235699d7cd] jit-backend-dump} +[b235699dc07] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc1f3 +0 E966020000 -[19b74d83b03e] jit-backend-dump} -[19b74d83b9b6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141667b9 +0 E966020000 +[b23569a56f1] jit-backend-dump} +[b23569a5cb9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc289 +0 E968020000 -[19b74d83d02a] jit-backend-dump} -[19b74df097c2] {jit-backend -[19b74e070346] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416684f +0 E968020000 +[b23569a66d5] jit-backend-dump} +[b2356d6a1b4] {jit-backend +[b2356e2c6af] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc5a0 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BBB021011BF07F00004D8B3B4983C70149BBB021011BF07F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284889BD70FFFFFF498B78304C89BD68FFFFFF4D8B783848898D60FFFFFF498B48404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899D40FFFFFF48899538FFFFFF48898530FFFFFF4C89BD28FFFFFF48898D20FFFFFF4C898518FFFFFF49BBC821011BF07F00004D8B034983C00149BBC821011BF07F00004D89034983FA050F8500000000813F806300000F85000000004C8B57104D85D20F84000000004C8B4708498B4A108139582D03000F85000000004D8B5208498B4A084D8B7A104D8B52184983F8000F8C000000004D39D00F8D000000004C89C04D0FAFC74889CA4C01C14883C001488947084983FD000F850000000049BBB81A2E18F07F00004D39DE0F85000000004C8BB570FFFFFF4D8B6E0849BBA8CB2D18F07F00004D39DD0F85000000004D8B451049BBC0CB2D18F07F00004D39D80F85000000004C8B2C2500D785014981FD201288010F850000000048899510FFFFFF48898D08FFFFFF48898500FFFFFF4889BDF8FEFFFF4C8995F0FEFFFF4889CF41BBA01FEF0041FFD348833C25A046A002000F85000000004C8B9560FFFFFF498B7A10813FF0CE01000F8500000000498B7A08488B4F084889CA4883C101488985E8FEFFFF4889BDE0FEFFFF488995D8FEFFFF4889CE41BB9029790041FFD348833C25A046A002000F8500000000488B95E0FEFFFF488B7A104C8B95D8FEFFFF488B8DE8FEFFFFF64704017431F6470440751B57415252514C89D64889CA41BB50C2C50041FFD3595A415A5FEB10415249C1EA074983F2F84C0FAB17415A4A894CD710488B0C254845A0024883F9000F8C0000000049BBE021011BF07F00004D8B334983C60149BBE021011BF07F00004D89334C8BB500FFFFFF4C3BB5F0FEFFFF0F8D000000004D0FAFF7488B8D10FFFFFF4C01F14C8BB500FFFFFF4983C6014C8B95F8FEFFFF4D89720848898D08FFFFFF488995D0FEFFFF4889CF41BBA01FEF0041FFD348833C25A046A002000F85000000004C8B95D0FEFFFF498B52084889D14883C201488985C8FEFFFF48898DC0FEFFFF4C89D74889D641BB9029790041FFD348833C25A046A002000F8500000000488B95D0FEFFFF488B4A104C8B95C0FEFFFF488B85C8FEFFFFF64104017434F6410440751E51415252504889CF4C89D64889C241BB50C2C50041FFD3585A415A59EB10415249C1EA074983F2F84C0FAB11415A4A8944D110488B04254845A0024883F8000F8C000000004C89B500FFFFFFE9CDFEFFFF49BB00B01B18F07F000041FFD3294C404438355055585C60481C64686C034000000049BB00B01B18F07F000041FFD34C401C44383550585C604864686C034100000049BB00B01B18F07F000041FFD34C401C2844383550585C604864686C034200000049BB00B01B18F07F000041FFD34C401C21042844383550585C604864686C034300000049BB00B01B18F07F000041FFD34C401C21293D0544383550585C604864686C034400000049BB00B01B18F07F000041FFD34C401C213D0544383550585C604864686C034500000049BB00B01B18F07F000041FFD3354C40443850585C60481C686C05034600000049BB00B01B18F07F000041FFD34C403844505C60481C686C05034700000049BB00B01B18F07F000041FFD34C383444505C60481C686C05034800000049BB00B01B18F07F000041FFD34C38203444505C60481C686C05034900000049BB00B01B18F07F000041FFD34C383444505C60481C686C05034A00000049BB00B01B18F07F000041FFD34C383444505C60481C686C05034B00000049BB43B01B18F07F000041FFD34C380044505C60487C6C75034C00000049BB00B01B18F07F000041FFD34C381C2844505C607C6C0075034D00000049BB43B01B18F07F000041FFD34C388D018401880144505C60487C6C0775034E00000049BB00B01B18F07F000041FFD34C3844505C60487C6C0775034F00000049BB00B01B18F07F000041FFD34C407C393D7144505C60486C75035000000049BB00B01B18F07F000041FFD34C4044505C6048286C0507035100000049BB43B01B18F07F000041FFD34C400044505C60487C6C7507035200000049BB43B01B18F07F000041FFD34C4099019401900144505C60487C6C7507035300000049BB00B01B18F07F000041FFD34C4044505C60487C6C75070354000000 -[19b74e09e4ec] jit-backend-dump} -[19b74e09f902] {jit-backend-addr -Loop 2 ( #13 FOR_ITER) has address 7ff0181bc5d6 to 7ff0181bc9ad (bootstrap 7ff0181bc5a0) -[19b74e0a1c66] jit-backend-addr} -[19b74e0a2caa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166b60 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BB40C2FB16497F00004D8B3B4983C70149BB40C2FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284C89BD70FFFFFF4D8B783048898D68FFFFFF498B483848899560FFFFFF498B50404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899D40FFFFFF48898538FFFFFF48898D30FFFFFF48899528FFFFFF4C898520FFFFFF49BB58C2FB16497F00004D8B034983C00149BB58C2FB16497F00004D89034983FA050F850000000041813F806300000F85000000004D8B57104D85D20F84000000004D8B4708498B5210813A582D03000F85000000004D8B5208498B5208498B4A104D8B52184983F8000F8C000000004D39D00F8D000000004C89C04C0FAFC14889D34C01C24883C001498947084983FD000F850000000049BB98BD2814497F00004D39DE0F85000000004C8B770849BBA86B2814497F00004D39DE0F85000000004D8B6E1049BBC06B2814497F00004D39DD0F85000000004C8B342500D785014981FE201288010F850000000048898518FFFFFF4889BD10FFFFFF4C899508FFFFFF48898D00FFFFFF488995F8FEFFFF4889D741BBA01FEF0041FFD348833C25A046A002000F8500000000488B9568FFFFFF488B4A108139F0CE01000F8500000000488B4A084C8B51084C89D74983C2014889BDF0FEFFFF488985E8FEFFFF48898DE0FEFFFF4889CF4C89D641BB9029790041FFD348833C25A046A002000F8500000000488B8DE0FEFFFF488B5110488B85F0FEFFFF4C8B95E8FEFFFFF64204017432F6420440751E51415252504889D74889C64C89D241BB50C2C50041FFD3585A415A59EB0E5048C1E8074883F0F8480FAB02584C8954C2104C8B14254845A0024983FA000F8C0000000049BB70C2FB16497F00004D8B134983C20149BB70C2FB16497F00004D89134C8B9518FFFFFF4C3B9508FFFFFF0F8D000000004C0FAF9500FFFFFF4889D84C01D34C8B9518FFFFFF4983C2014D895708488985D8FEFFFF4C8995D0FEFFFF48898DC8FEFFFF4889DF41BBA01FEF0041FFD348833C25A046A002000F8500000000488B8DC8FEFFFF4C8B51084C89D24983C201488985C0FEFFFF488995B8FEFFFF4889CF4C89D641BB9029790041FFD348833C25A046A002000F8500000000488B95C8FEFFFF488B4A10488B85B8FEFFFF4C8B95C0FEFFFFF64104017432F6410440751E50524152514889CF4889C64C89D241BB50C2C50041FFD359415A5A58EB0E5048C1E8074883F0F8480FAB01584C8954C1104C8B14254845A0024983FA000F8C0000000048899DF8FEFFFF4C8B9DD0FEFFFF4C899D18FFFFFF488B9DD8FEFFFF4889D1E9B7FEFFFF49BB00501614497F000041FFD3294C1C403835505558485C443C606468035900000049BB00501614497F000041FFD34C1C3C4038355058485C44606468035A00000049BB00501614497F000041FFD34C1C3C284038355058485C44606468035B00000049BB00501614497F000041FFD34C1C3C2108284038355058485C44606468035C00000049BB00501614497F000041FFD34C1C3C212905094038355058485C44606468035D00000049BB00501614497F000041FFD34C1C3C2105094038355058485C44606468035E00000049BB00501614497F000041FFD3354C1C40385058485C443C646809035F00000049BB00501614497F000041FFD34C1C384050485C443C646809036000000049BB00501614497F000041FFD34C1C384050485C443C646809036100000049BB00501614497F000041FFD34C1C34384050485C443C646809036200000049BB00501614497F000041FFD34C1C384050485C443C646809036300000049BB00501614497F000041FFD34C1C384050485C443C646809036400000049BB43501614497F000041FFD34C70004050485C443C687D036500000049BB00501614497F000041FFD34C7004084050485C3C68007D036600000049BB43501614497F000041FFD34C708101840188014050485C443C68077D036700000049BB00501614497F000041FFD34C704050485C443C68077D036800000049BB00501614497F000041FFD34C703C29790D4050485C44687D036900000049BB00501614497F000041FFD34C704050485C443C680D07036A00000049BB43501614497F000041FFD34C70004050485C443C680D07036B00000049BB43501614497F000041FFD34C709D01980194014050485C443C680D07036C00000049BB00501614497F000041FFD34C704050485C443C680D07036D000000 +[b2356e39eb9] jit-backend-dump} +[b2356e3a639] {jit-backend-addr +Loop 4 ( #13 FOR_ITER) has address 7f4914166b96 to 7f4914166f7a (bootstrap 7f4914166b60) +[b2356e3b5e7] jit-backend-addr} +[b2356e3bf81] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc5d2 +0 C0FEFFFF -[19b74e0a48d0] jit-backend-dump} -[19b74e0a5a8e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166b92 +0 B0FEFFFF +[b2356e45271] jit-backend-dump} +[b2356e45eed] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6af +0 FA020000 -[19b74e0a72ee] jit-backend-dump} -[19b74e0a7d80] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166c68 +0 0E030000 +[b2356e46a71] jit-backend-dump} +[b2356e471f7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6bb +0 10030000 -[19b74e0a93b8] jit-backend-dump} -[19b74e0a9d0c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166c75 +0 23030000 +[b2356e47b97] jit-backend-dump} +[b2356e47f99] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6c8 +0 23030000 -[19b74e0ab278] jit-backend-dump} -[19b74e0abbfc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166c82 +0 36030000 +[b2356e4885b] jit-backend-dump} +[b2356e48c61] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6dc +0 30030000 -[19b74e0ad36c] jit-backend-dump} -[19b74e0addda] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166c96 +0 43030000 +[b2356e49505] jit-backend-dump} +[b2356e498e9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6f6 +0 39030000 -[19b74e0af3e8] jit-backend-dump} -[19b74e0afd36] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166cb0 +0 4C030000 +[b2356e4a391] jit-backend-dump} +[b2356e4a879] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc6ff +0 54030000 -[19b74e0b5e1a] jit-backend-dump} -[19b74e0b6972] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166cb9 +0 67030000 +[b2356e4b295] jit-backend-dump} +[b2356e4b6a5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc71e +0 58030000 -[19b74e0b8070] jit-backend-dump} -[19b74e0b8a9c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166cd8 +0 6B030000 +[b2356e4bf31] jit-backend-dump} +[b2356e4c315] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc731 +0 65030000 -[19b74e0ba1ca] jit-backend-dump} -[19b74e0babf0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166ceb +0 78030000 +[b2356e4cbb9] jit-backend-dump} +[b2356e4cfaf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc74f +0 65030000 -[19b74e0bc360] jit-backend-dump} -[19b74e0bcd8c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d02 +0 7F030000 +[b2356e4d827] jit-backend-dump} +[b2356e4dd37] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc766 +0 6C030000 -[19b74e0be2da] jit-backend-dump} -[19b74e0bee14] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d19 +0 86030000 +[b2356e4e7d9] jit-backend-dump} +[b2356e4ee51] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc77b +0 94030000 -[19b74e0c03c2] jit-backend-dump} -[19b74e0c0d10] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d2e +0 AE030000 +[b2356e4f7ed] jit-backend-dump} +[b2356e4fbcd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc7b9 +0 74030000 -[19b74e0c2240] jit-backend-dump} -[19b74e0c2cc0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d6c +0 8E030000 +[b2356e50461] jit-backend-dump} +[b2356e50831] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc7d0 +0 7A030000 -[19b74e0c4370] jit-backend-dump} -[19b74e0c4d84] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d83 +0 94030000 +[b2356e510c9] jit-backend-dump} +[b2356e514c1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc80f +0 59030000 -[19b74e0c63e6] jit-backend-dump} -[19b74e0c6de8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166dc5 +0 70030000 +[b2356e51de9] jit-backend-dump} +[b2356e522e5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc876 +0 15030000 -[19b74e0c8348] jit-backend-dump} -[19b74e0c8c78] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166e2d +0 2B030000 +[b2356e52cef] jit-backend-dump} +[b2356e531b7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc8a8 +0 00030000 -[19b74e0ca19c] jit-backend-dump} -[19b74e0cab8c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166e5f +0 16030000 +[b2356e53a4b] jit-backend-dump} +[b2356e53e9b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc8f5 +0 EF020000 -[19b74e0cc0f2] jit-backend-dump} -[19b74e0ccb42] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166eac +0 05030000 +[b2356e54735] jit-backend-dump} +[b2356e54b39] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc933 +0 CF020000 -[19b74e0ce22e] jit-backend-dump} -[19b74e0cec4e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166eea +0 E5020000 +[b2356e553c9] jit-backend-dump} +[b2356e558d7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc99d +0 88020000 -[19b74e0d02a4] jit-backend-dump} -[19b74e0d16ae] jit-backend} -[19b74e0d53ec] {jit-log-opt-loop -# Loop 2 ( #13 FOR_ITER) : loop with 100 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166f52 +0 A0020000 +[b2356e5635d] jit-backend-dump} +[b2356e57001] jit-backend} +[b2356e5939d] {jit-log-opt-loop +# Loop 4 ( #13 FOR_ITER) : loop with 100 ops [p0, p1] +84: p2 = getfield_gc(p0, descr=) +88: p3 = getfield_gc(p0, descr=) @@ -880,643 +1164,643 @@ +157: p22 = getarrayitem_gc(p8, 6, descr=) +168: p24 = getarrayitem_gc(p8, 7, descr=) +172: p25 = getfield_gc(p0, descr=) -+172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140669221669808)) ++172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(139951894599248)) debug_merge_point(0, ' #13 FOR_ITER') -+265: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] -+275: guard_class(p18, 38562496, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+287: p28 = getfield_gc(p18, descr=) -+291: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+300: i29 = getfield_gc(p18, descr=) -+304: p30 = getfield_gc(p28, descr=) -+308: guard_class(p30, 38745240, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+320: p32 = getfield_gc(p28, descr=) -+324: i33 = getfield_gc_pure(p32, descr=) -+328: i34 = getfield_gc_pure(p32, descr=) -+332: i35 = getfield_gc_pure(p32, descr=) -+336: i37 = int_lt(i29, 0) -guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+346: i38 = int_ge(i29, i35) -guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+355: i39 = int_mul(i29, i34) -+362: i40 = int_add(i33, i39) -+368: i42 = int_add(i29, 1) -+372: setfield_gc(p18, i42, descr=) -+376: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] ++258: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++268: guard_class(p18, 38562496, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++281: p28 = getfield_gc(p18, descr=) ++285: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++294: i29 = getfield_gc(p18, descr=) ++298: p30 = getfield_gc(p28, descr=) ++302: guard_class(p30, 38745240, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++314: p32 = getfield_gc(p28, descr=) ++318: i33 = getfield_gc_pure(p32, descr=) ++322: i34 = getfield_gc_pure(p32, descr=) ++326: i35 = getfield_gc_pure(p32, descr=) ++330: i37 = int_lt(i29, 0) +guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++340: i38 = int_ge(i29, i35) +guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++349: i39 = int_mul(i29, i34) ++356: i40 = int_add(i33, i39) ++362: i42 = int_add(i29, 1) ++366: setfield_gc(p18, i42, descr=) ++370: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+386: guard_value(p3, ConstPtr(ptr44), descr=) [p1, p0, p3, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+405: p45 = getfield_gc(p0, descr=) -+416: guard_value(p45, ConstPtr(ptr46), descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+435: p47 = getfield_gc(p45, descr=) -+439: guard_value(p47, ConstPtr(ptr48), descr=) [p1, p0, p47, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+458: guard_not_invalidated(, descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+458: p50 = getfield_gc(ConstPtr(ptr49), descr=) -+466: guard_value(p50, ConstPtr(ptr51), descr=) [p1, p0, p50, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++380: guard_value(p3, ConstPtr(ptr44), descr=) [p1, p0, p3, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++399: p45 = getfield_gc(p0, descr=) ++403: guard_value(p45, ConstPtr(ptr46), descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++422: p47 = getfield_gc(p45, descr=) ++426: guard_value(p47, ConstPtr(ptr48), descr=) [p1, p0, p47, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++445: guard_not_invalidated(, descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++445: p50 = getfield_gc(ConstPtr(ptr49), descr=) ++453: guard_value(p50, ConstPtr(ptr51), descr=) [p1, p0, p50, p2, p5, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+479: p53 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i40, descr=) -+526: guard_no_exception(, descr=) [p1, p0, p53, p2, p5, p12, p14, p16, p18, p24, i40] ++466: p53 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i40, descr=) ++513: guard_no_exception(, descr=) [p1, p0, p53, p2, p5, p12, p14, p16, p18, p24, i40] debug_merge_point(0, ' #28 LIST_APPEND') -+541: p54 = getfield_gc(p16, descr=) -+552: guard_class(p54, 38655536, descr=) [p1, p0, p54, p16, p2, p5, p12, p14, p18, p24, p53, i40] -+564: p56 = getfield_gc(p16, descr=) -+568: i57 = getfield_gc(p56, descr=) -+572: i59 = int_add(i57, 1) -+579: p60 = getfield_gc(p56, descr=) -+579: i61 = arraylen_gc(p60, descr=) -+579: call(ConstClass(_ll_list_resize_ge_trampoline__v575___simple_call__function__), p56, i59, descr=) -+612: guard_no_exception(, descr=) [p1, p0, i57, p53, p56, p2, p5, p12, p14, p16, p18, p24, None, i40] -+627: p64 = getfield_gc(p56, descr=) ++528: p54 = getfield_gc(p16, descr=) ++539: guard_class(p54, 38655536, descr=) [p1, p0, p54, p16, p2, p5, p12, p14, p18, p24, p53, i40] ++551: p56 = getfield_gc(p16, descr=) ++555: i57 = getfield_gc(p56, descr=) ++559: i59 = int_add(i57, 1) ++566: p60 = getfield_gc(p56, descr=) ++566: i61 = arraylen_gc(p60, descr=) ++566: call(ConstClass(_ll_list_resize_ge_trampoline__v575___simple_call__function__), p56, i59, descr=) ++602: guard_no_exception(, descr=) [p1, p0, i57, p53, p56, p2, p5, p12, p14, p16, p18, p24, None, i40] ++617: p64 = getfield_gc(p56, descr=) setarrayitem_gc(p64, i57, p53, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+712: i66 = getfield_raw(44057928, descr=) -+720: i68 = int_lt(i66, 0) -guard_false(i68, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, None, i40] ++703: i66 = getfield_raw(44057928, descr=) ++711: i68 = int_lt(i66, 0) +guard_false(i68, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, None, i40] debug_merge_point(0, ' #13 FOR_ITER') -+730: p69 = same_as(ConstPtr(ptr48)) -+730: label(p0, p1, p2, p5, i40, p12, p14, p16, p18, p24, i42, i35, i34, i33, p56, descr=TargetToken(140669221669888)) ++721: p69 = same_as(ConstPtr(ptr48)) ++721: label(p0, p1, p2, p5, i40, p12, p14, p16, p18, p24, i42, i35, i34, i33, p56, descr=TargetToken(139951894599328)) debug_merge_point(0, ' #13 FOR_ITER') -+760: i70 = int_ge(i42, i35) -guard_false(i70, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p14, p16, p24, i40] -+780: i71 = int_mul(i42, i34) -+784: i72 = int_add(i33, i71) -+794: i73 = int_add(i42, 1) ++751: i70 = int_ge(i42, i35) +guard_false(i70, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p14, p16, p24, i40] ++771: i71 = int_mul(i42, i34) ++779: i72 = int_add(i33, i71) ++785: i73 = int_add(i42, 1) debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+805: setfield_gc(p18, i73, descr=) -+816: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] ++796: setfield_gc(p18, i73, descr=) ++800: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+816: p74 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i72, descr=) -+842: guard_no_exception(, descr=) [p1, p0, p74, p2, p5, p12, p14, p16, p18, p24, i72, None] ++800: p74 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i72, descr=) ++833: guard_no_exception(, descr=) [p1, p0, p74, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #28 LIST_APPEND') -+857: i75 = getfield_gc(p56, descr=) -+868: i76 = int_add(i75, 1) -+875: p77 = getfield_gc(p56, descr=) -+875: i78 = arraylen_gc(p77, descr=) -+875: call(ConstClass(_ll_list_resize_ge_trampoline__v575___simple_call__function__), p56, i76, descr=) -+904: guard_no_exception(, descr=) [p1, p0, i75, p74, p56, p2, p5, p12, p14, p16, p18, p24, i72, None] -+919: p79 = getfield_gc(p56, descr=) ++848: i75 = getfield_gc(p56, descr=) ++859: i76 = int_add(i75, 1) ++866: p77 = getfield_gc(p56, descr=) ++866: i78 = arraylen_gc(p77, descr=) ++866: call(ConstClass(_ll_list_resize_ge_trampoline__v575___simple_call__function__), p56, i76, descr=) ++895: guard_no_exception(, descr=) [p1, p0, i75, p74, p56, p2, p5, p12, p14, p16, p18, p24, i72, None] ++910: p79 = getfield_gc(p56, descr=) setarrayitem_gc(p79, i75, p74, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+1007: i80 = getfield_raw(44057928, descr=) -+1015: i81 = int_lt(i80, 0) -guard_false(i81, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] ++996: i80 = getfield_raw(44057928, descr=) ++1004: i81 = int_lt(i80, 0) +guard_false(i81, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #13 FOR_ITER') -+1025: jump(p0, p1, p2, p5, i72, p12, p14, p16, p18, p24, i73, i35, i34, i33, p56, descr=TargetToken(140669221669888)) -+1037: --end of the loop-- -[19b74e1b18e0] jit-log-opt-loop} -[19b74eab5a20] {jit-backend -[19b74eae2108] {jit-backend-dump ++1014: jump(p0, p1, p2, p5, i72, p12, p14, p16, p18, p24, i73, i35, i34, i33, p56, descr=TargetToken(139951894599328)) ++1050: --end of the loop-- +[b2356ec920e] jit-log-opt-loop} +[b235731c717] {jit-backend +[b2357338c53] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcc46 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BBF821011BF07F00004D8B3B4983C70149BBF821011BF07F00004D893B4C8B7E404D0FB67C3F184983FF330F85000000004989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00B01B18F07F000041FFD31D180355000000 -[19b74eae9cb0] jit-backend-dump} -[19b74eaea97c] {jit-backend-addr -Loop 3 (re StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7ff0181bcc7c to 7ff0181bccef (bootstrap 7ff0181bcc46) -[19b74eaec6b0] jit-backend-addr} -[19b74eaed3f4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167213 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BB88C2FB16497F00004D8B3B4983C70149BB88C2FB16497F00004D893B4C8B7E404D0FB67C3F184983FF330F85000000004989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00501614497F000041FFD31D18036E000000 +[b235733cd55] jit-backend-dump} +[b235733d271] {jit-backend-addr +Loop 5 (re StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7f4914167249 to 7f49141672bc (bootstrap 7f4914167213) +[b235733de81] jit-backend-addr} +[b235733e473] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcc78 +0 70FFFFFF -[19b74eaeec1e] jit-backend-dump} -[19b74eaef7dc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167245 +0 70FFFFFF +[b235733ef61] jit-backend-dump} +[b235733f6d1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bccaa +0 41000000 -[19b74eaf0e74] jit-backend-dump} -[19b74eaf1d38] jit-backend} -[19b74eaf5b18] {jit-log-opt-loop -# Loop 3 (re StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) : entry bridge with 10 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167277 +0 41000000 +[b235733ffb7] jit-backend-dump} +[b23573406b3] jit-backend} +[b2357342487] {jit-log-opt-loop +# Loop 5 (re StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) : entry bridge with 10 ops [i0, p1] debug_merge_point(0, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') +84: p2 = getfield_gc(p1, descr=) +88: i3 = strgetitem(p2, i0) +94: i5 = int_eq(i3, 51) -guard_true(i5, descr=) [i0, p1] +guard_true(i5, descr=) [i0, p1] +104: i7 = int_add(i0, 1) +111: setfield_gc(p1, i7, descr=) +115: setfield_gc(p1, ConstPtr(ptr8), descr=) +123: setfield_gc(p1, i0, descr=) +127: finish(1, descr=) +169: --end of the loop-- -[19b74eb165f0] jit-log-opt-loop} -[19b74f266f5c] {jit-backend -[19b74f2961fa] {jit-backend-dump +[b2357354d2b] jit-log-opt-loop} +[b23577c8a9f] {jit-backend +[b23577deaef] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd03 +0 488DA50000000049BB1022011BF07F00004D8B3B4983C70149BB1022011BF07F00004D893B4883C7014C8B7E084C39FF0F8D000000004C8B76404D0FB6743E184983FE330F84000000004883C7014C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00B01B18F07F000041FFD31D18035600000049BB00B01B18F07F000041FFD31D18035700000049BB00B01B18F07F000041FFD31D180358000000 -[19b74f29dd12] jit-backend-dump} -[19b74f29e99c] {jit-backend-addr -bridge out of Guard 85 has address 7ff0181bcd03 to 7ff0181bcd84 -[19b74f2a03dc] jit-backend-addr} -[19b74f2a114a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141672d0 +0 488DA50000000049BBA0C2FB16497F00004D8B3B4983C70149BBA0C2FB16497F00004D893B4883C7014C8B7E084C39FF0F8D000000004C8B76404D0FB6743E184983FE330F84000000004883C7014C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00501614497F000041FFD31D18036F00000049BB00501614497F000041FFD31D18037000000049BB00501614497F000041FFD31D180371000000 +[b23577e2449] jit-backend-dump} +[b23577e2951] {jit-backend-addr +bridge out of Guard 110 has address 7f49141672d0 to 7f4914167351 +[b23577e34f3] jit-backend-addr} +[b23577e3a33] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd06 +0 70FFFFFF -[19b74f2a2c20] jit-backend-dump} -[19b74f2a388c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141672d3 +0 70FFFFFF +[b23577e44c1] jit-backend-dump} +[b23577e4b31] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd35 +0 4B000000 -[19b74f2a4ed6] jit-backend-dump} -[19b74f2a5878] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167302 +0 4B000000 +[b23577e54d9] jit-backend-dump} +[b23577e58e7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd49 +0 4B000000 -[19b74f2a6d54] jit-backend-dump} -[19b74f2a7798] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167316 +0 4B000000 +[b23577e620b] jit-backend-dump} +[b23577e65f9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd56 +0 52000000 -[19b74f2a8de8] jit-backend-dump} -[19b74f2a9a3c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167323 +0 52000000 +[b23577e6ee5] jit-backend-dump} +[b23577e74ab] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bccaa +0 55000000 -[19b74f2aafe4] jit-backend-dump} -[19b74f2abf92] jit-backend} -[19b74f2ad546] {jit-log-opt-bridge -# bridge out of Guard 85 with 13 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167277 +0 55000000 +[b23577e7dd7] jit-backend-dump} +[b23577e8497] jit-backend} +[b23577e8eb1] {jit-log-opt-bridge +# bridge out of Guard 110 with 13 ops [i0, p1] +37: i3 = int_add(i0, 1) +41: i4 = getfield_gc_pure(p1, descr=) +45: i5 = int_lt(i3, i4) -guard_true(i5, descr=) [i3, p1] +guard_true(i5, descr=) [i3, p1] debug_merge_point(0, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') +54: p6 = getfield_gc(p1, descr=) +58: i7 = strgetitem(p6, i3) +64: i9 = int_eq(i7, 51) -guard_false(i9, descr=) [i3, p1] +guard_false(i9, descr=) [i3, p1] +74: i11 = int_add(i3, 1) +78: i12 = int_lt(i11, i4) -guard_false(i12, descr=) [i11, p1] +guard_false(i12, descr=) [i11, p1] +87: finish(0, descr=) +129: --end of the loop-- -[19b74f2c70b8] jit-log-opt-bridge} -[19b74f9ae658] {jit-backend -[19b74f9d0d20] {jit-backend-dump +[b23577f427d] jit-log-opt-bridge} +[b2357ae4bf9] {jit-backend +[b2357af5e29] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcdc0 +0 488DA50000000049BB2822011BF07F00004D8B3B4983C70149BB2822011BF07F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00B01B18F07F000041FFD31D18035900000049BB00B01B18F07F000041FFD31D18035A000000 -[19b74f9d77d6] jit-backend-dump} -[19b74f9d8352] {jit-backend-addr -bridge out of Guard 88 has address 7ff0181bcdc0 to 7ff0181bce34 -[19b74f9d9924] jit-backend-addr} -[19b74f9da500] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416738d +0 488DA50000000049BBB8C2FB16497F00004D8B3B4983C70149BBB8C2FB16497F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00501614497F000041FFD31D18037200000049BB00501614497F000041FFD31D180373000000 +[b2357af91d7] jit-backend-dump} +[b2357af9671] {jit-backend-addr +bridge out of Guard 113 has address 7f491416738d to 7f4914167401 +[b2357af9fe3] jit-backend-addr} +[b2357afa547] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcdc3 +0 70FFFFFF -[19b74f9dbdfc] jit-backend-dump} -[19b74f9dca92] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167390 +0 70FFFFFF +[b2357afaff9] jit-backend-dump} +[b2357afb599] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcdf5 +0 3B000000 -[19b74f9de1f0] jit-backend-dump} -[19b74f9dec04] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141673c2 +0 3B000000 +[b2357afc05f] jit-backend-dump} +[b2357afc493] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bce06 +0 3E000000 -[19b74f9e027e] jit-backend-dump} -[19b74f9e0e18] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141673d3 +0 3E000000 +[b2357afcf2f] jit-backend-dump} +[b2357afd4b9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd56 +0 66000000 -[19b74f9e2324] jit-backend-dump} -[19b74f9e308c] jit-backend} -[19b74f9e42e6] {jit-log-opt-bridge -# bridge out of Guard 88 with 10 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167323 +0 66000000 +[b2357b04fa1] jit-backend-dump} +[b2357b0571b] jit-backend} +[b2357b0611d] {jit-log-opt-bridge +# bridge out of Guard 113 with 10 ops [i0, p1] debug_merge_point(0, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') +37: p2 = getfield_gc(p1, descr=) +41: i3 = strgetitem(p2, i0) +47: i5 = int_eq(i3, 51) -guard_false(i5, descr=) [i0, p1] +guard_false(i5, descr=) [i0, p1] +57: i7 = int_add(i0, 1) +61: i8 = getfield_gc_pure(p1, descr=) +65: i9 = int_lt(i7, i8) -guard_false(i9, descr=) [i7, p1] +guard_false(i9, descr=) [i7, p1] +74: finish(0, descr=) +116: --end of the loop-- -[19b74fa083e8] jit-log-opt-bridge} -[19b75013d436] {jit-backend -[19b75015049e] {jit-backend-dump +[b2357b0f3f9] jit-log-opt-bridge} +[b2357e450c3] {jit-backend +[b2357e4ebbd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bce5c +0 488DA50000000049BB4022011BF07F0000498B334883C60149BB4022011BF07F0000498933B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 -[19b75015543c] jit-backend-dump} -[19b750155e80] {jit-backend-addr -bridge out of Guard 86 has address 7ff0181bce5c to 7ff0181bceab -[19b750157272] jit-backend-addr} -[19b750157db8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167429 +0 488DA50000000049BBD0C2FB16497F0000498B334883C60149BBD0C2FB16497F0000498933B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[b2357e51471] jit-backend-dump} +[b2357e518f1] {jit-backend-addr +bridge out of Guard 111 has address 7f4914167429 to 7f4914167478 +[b2357e521b1] jit-backend-addr} +[b2357e526f9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bce5f +0 70FFFFFF -[19b75015985e] jit-backend-dump} -[19b75015a566] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416742c +0 70FFFFFF +[b2357e53241] jit-backend-dump} +[b2357e538b1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd35 +0 23010000 -[19b75015bc76] jit-backend-dump} -[19b75015c996] jit-backend} -[19b75015db8a] {jit-log-opt-bridge -# bridge out of Guard 86 with 1 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167302 +0 23010000 +[b2357e54323] jit-backend-dump} +[b2357e54939] jit-backend} +[b2357e5514d] {jit-log-opt-bridge +# bridge out of Guard 111 with 1 ops [i0, p1] +37: finish(0, descr=) +79: --end of the loop-- -[19b750163242] jit-log-opt-bridge} -[19b751eb804a] {jit-backend -[19b7521ede4c] {jit-backend-dump +[b2357e57999] jit-log-opt-bridge} +[b2358dd6121] {jit-backend +[b2358f664ff] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd093 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BB5822011BF07F00004D8B3B4983C70149BB5822011BF07F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284C89BD70FFFFFF4D8B783048899D68FFFFFF498B58384889BD60FFFFFF498B78404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899540FFFFFF48898538FFFFFF4C89BD30FFFFFF48899D28FFFFFF4889BD20FFFFFF4C898518FFFFFF49BB7022011BF07F00004D8B034983C00149BB7022011BF07F00004D89034983FA040F85000000008139806300000F85000000004C8B51104D85D20F84000000004C8B4108498B7A10813FF0CE01000F85000000004D8B5208498B7A084939F80F83000000004D8B52104F8B54C2104D85D20F84000000004983C0014C8941084983FD000F850000000049BBB81A2E18F07F00004D39DE0F85000000004C8BB560FFFFFF4D8B6E0849BBA8CB2D18F07F00004D39DD0F85000000004D8B451049BBC0CB2D18F07F00004D39D80F850000000049BB58C53018F07F00004D8B2B49BB60C53018F07F00004D39DD0F850000000048898D10FFFFFF4C899508FFFFFF41BB201B8D0041FFD34C8B5040488B48504885C90F8500000000488B48284883F9000F850000000049BBB0D73018F07F0000498B0B4883F9000F8F00000000488B0C2500D785014881F9201288010F850000000049BB88C53018F07F0000498B0B813910E001000F850000000049BB80C53018F07F0000498B0B48898500FFFFFF488B042530255601488D5040483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C7008800000048C74008030000004889C24883C02848C700508A0100488968084C8BAD00FFFFFF41F6450401741950524152514C89EF4889C641BBF0C4C50041FFD359415A5A584989454049896E1848C7421060CE830149BBA0453018F07F00004C895A1849BBA0523018F07F00004C895A2048898DF8FEFFFF4C8995F0FEFFFF488995E8FEFFFF488985E0FEFFFF48C78578FFFFFF5B0000004889D741BB3036920041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F8500000000488985D8FEFFFF488B042530255601488D5010483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700E0300000488B9560FFFFFF48896A184C8BADE8FEFFFF4C896808488985D0FEFFFF48C78578FFFFFF5C000000488BBDF8FEFFFF4889C6488B95D8FEFFFF41BBA02E790041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85F8FEFFFF488B4018486BD218488B5410184883FA017206813AB0EB03000F85000000004881FAC02C72010F8400000000488B8500FFFFFF4C8B68504D85ED0F85000000004C8B68284983FD000F85000000004C8BADE0FEFFFF49C74508FDFFFFFF4C8BAD08FFFFFF4D8B751049BBFFFFFFFFFFFFFF7F4D39DE0F8D000000004C8B5210488B4A184D8B42104983F8110F85000000004D8B42204C89C74983E0014983F8000F8400000000498B7A384883FF010F8F00000000498B7A184883C7014D8B44FA104983F8130F85000000004989F84883C701498B7CFA104983C0024983FE000F8E000000004983F80B0F85000000004883FF330F850000000049BB10CBFC1AF07F00004D39DA0F8500000000488995C8FEFFFF488B042530255601488D5060483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700D00001004889C24883C04848C700508A0100488968084C8B9500FFFFFF41F6420401741951525041524C89D74889C641BBF0C4C50041FFD3415A585A5949894240488BBD60FFFFFF48896F1849BB10CBFC1AF07F00004C895A3848894A104C8972084C896A40488985C0FEFFFF488995B8FEFFFF48C78578FFFFFF5D000000BF000000004889D649BB46CC1B18F07F000041FFD34883F80274134889C7BE0000000041BB7053950041FFD3EB08488B0425D0D155014883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004885C00F8500000000488B8500FFFFFF488B78504885FF0F8500000000488B78284883FF000F8500000000488B95F0FEFFFFF640040174155750524889C74889D641BBF0C4C50041FFD35A585F488950404C8B95C0FEFFFF49C74208FDFFFFFF4C8B14254845A0024983FA000F8C0000000049BB8822011BF07F00004D8B134983C20149BB8822011BF07F00004D89134C8B9510FFFFFF4D8B6A104D85ED0F84000000004D8B7208498B4D108139F0CE01000F85000000004D8B6D08498B4D084939CE0F83000000004D8B6D104F8B6CF5104D85ED0F84000000004983C601488B8D60FFFFFF4C8B41084D89720849BBA8CB2D18F07F00004D39D80F85000000004D8B701049BBC0CB2D18F07F00004D39DE0F850000000049BB58C53018F07F00004D8B0349BB60C53018F07F00004D39D80F85000000004883FF000F850000000049BBB0D73018F07F0000498B3B4883FF000F8F00000000488B3C2500D785014881FF201288010F850000000049BB88C53018F07F0000498B3B813F10E001000F850000000049BB80C53018F07F0000498B3B488985B0FEFFFF488995A8FEFFFF488B042530255601488D5040483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C7008800000048C74008030000004889C24883C02848C700508A0100488968084C8B85B0FEFFFF41F6400401741F50415252515741504C89C74889C641BBF0C4C50041FFD341585F595A415A58498940404889691848C7421060CE830149BBA0453018F07F00004C895A1849BBA0523018F07F00004C895A204889BDA0FEFFFF48899598FEFFFF48898590FEFFFF4C89AD08FFFFFF48C78578FFFFFF5E0000004889D741BB3036920041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F850000000048898588FEFFFF488B042530255601488D5010483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700E0300000488B9560FFFFFF48896A184C8BAD98FEFFFF4C89680848898580FEFFFF48C78578FFFFFF5F000000488BBDA0FEFFFF4889C6488B9588FEFFFF41BBA02E790041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85A0FEFFFF488B4018486BD218488B5410184883FA017206813AB0EB03000F85000000004881FAC02C72010F8400000000488B85B0FEFFFF4C8B68504D85ED0F85000000004C8B68284983FD000F85000000004C8BAD90FEFFFF49C74508FDFFFFFF4C8BAD08FFFFFF4D8B551049BBFFFFFFFFFFFFFF7F4D39DA0F8D00000000488B4A10488B7A184C8B41104983F8110F85000000004C8B41204D89C64983E0014983F8000F84000000004C8B71384983FE010F8F000000004C8B71184983C6014E8B44F1104983F8130F85000000004D89F04983C6014E8B74F1104983C0024983FA000F8E000000004983F80B0F85000000004983FE330F850000000049BB10CBFC1AF07F00004C39D90F850000000048899578FEFFFF488B042530255601488D5060483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700D00001004889C24883C04848C700508A010048896808488B8DB0FEFFFFF6410401741B5741525052514889CF4889C641BBF0C4C50041FFD3595A58415A5F488941404C8BB560FFFFFF49896E1849BB10CBFC1AF07F00004C895A3848897A104C8952084C896A4048899570FEFFFF48898568FEFFFF48C78578FFFFFF60000000BF000000004889D649BB46CC1B18F07F000041FFD34883F80274134889C7BE0000000041BB7053950041FFD3EB08488B0425D0D155014883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004885C00F8500000000488B85B0FEFFFF4C8B68504D85ED0F85000000004C8B68284983FD000F85000000004C8BB5A8FEFFFFF64004017411504889C74C89F641BBF0C4C50041FFD3584C897040488B9568FEFFFF48C74208FDFFFFFF488B14254845A0024883FA000F8C000000004C89EF4C89F2E96EFAFFFF49BB00B01B18F07F000041FFD3294C48403835505544585C046064686C036100000049BB00B01B18F07F000041FFD34C48044038355044585C6064686C036200000049BB00B01B18F07F000041FFD34C4804284038355044585C6064686C036300000049BB00B01B18F07F000041FFD34C4804211C284038355044585C6064686C036400000049BB00B01B18F07F000041FFD34C4804211D284038355044585C6064686C036500000049BB00B01B18F07F000041FFD34C480421284038355044585C6064686C036600000049BB00B01B18F07F000041FFD3354C4840385044585C0464686C28036700000049BB00B01B18F07F000041FFD34C4838405044580464686C28036800000049BB00B01B18F07F000041FFD34C3834405044580464686C28036900000049BB00B01B18F07F000041FFD34C382034405044580464686C28036A00000049BB00B01B18F07F000041FFD34C3834405044580464686C28036B00000049BB00B01B18F07F000041FFD34C3834405044580464686C28036C00000049BB00B01B18F07F000041FFD34C3800044050445870152874036D00000049BB00B01B18F07F000041FFD34C38004050445870152874036E00000049BB00B01B18F07F000041FFD34C38004050445870152874036F00000049BB00B01B18F07F000041FFD34C3800054050445870152874037000000049BB00B01B18F07F000041FFD34C380004405044587015152874037100000049BB00B01B18F07F000041FFD34C380004405044587015152874037200000049BB43B01B18F07F000041FFD34C48787C0188014050445870157484018001035B00000049BB43B01B18F07F000041FFD34C48787C0188014050445870157484018001037300000049BB43B01B18F07F000041FFD34C48789001017C8801405044587015748001035C00000049BB43B01B18F07F000041FFD34C48789001017C8801405044587015748001037400000049BB00B01B18F07F000041FFD34C48789001097C8801405044587015748001037500000049BB00B01B18F07F000041FFD34C48789001088801405044587015748001037600000049BB00B01B18F07F000041FFD34C48788801405044587008900115748001037700000049BB00B01B18F07F000041FFD34C480008348801405044587007900115748001037800000049BB00B01B18F07F000041FFD34C4800088801405044587007900115748001037900000049BB00B01B18F07F000041FFD34C48004050445870080715748001037A00000049BB00B01B18F07F000041FFD34C480008344050445870070715078001037B00000049BB00B01B18F07F000041FFD34C4800084050445870390528070715348001037C00000049BB00B01B18F07F000041FFD34C4800081D4050445870390528070715348001037D00000049BB00B01B18F07F000041FFD34C4800084050445870390528070715348001037E00000049BB00B01B18F07F000041FFD34C4800081D4050445870390528070715348001037F00000049BB00B01B18F07F000041FFD34C4800081D214050445870390528070715348001038000000049BB00B01B18F07F000041FFD34C4800081D21284050445870390507070715348001038100000049BB00B01B18F07F000041FFD34C4800081D284050445870390507070715348001038200000049BB00B01B18F07F000041FFD34C480008284050445870390507070715348001038300000049BB43B01B18F07F000041FFD34C48789C0194010198014050445870748001035D00000049BB43B01B18F07F000041FFD34C48789C0194010198014050445870748001038400000049BB00B01B18F07F000041FFD34C48789C01940198014050445870748001038500000049BB00B01B18F07F000041FFD34C48001C98014050445870748001038600000049BB00B01B18F07F000041FFD34C480098014050445870748001038700000049BB00B01B18F07F000041FFD34C4840504458707407038800000049BB00B01B18F07F000041FFD34C4840504458707407038900000049BB00B01B18F07F000041FFD34C4828344050445874038A00000049BB00B01B18F07F000041FFD34C48283904344050445874038B00000049BB00B01B18F07F000041FFD34C48283905344050445874038C00000049BB00B01B18F07F000041FFD34C482839344050445874038D00000049BB00B01B18F07F000041FFD34C042040504458283407038E00000049BB00B01B18F07F000041FFD34C04382040504458283407038F00000049BB00B01B18F07F000041FFD34C042040504458283407039000000049BB00B01B18F07F000041FFD34C042040504458283407039100000049BB00B01B18F07F000041FFD34C0400405044582808153407039200000049BB00B01B18F07F000041FFD34C04001D405044582808153407039300000049BB00B01B18F07F000041FFD34C04001C40504458281508153407039400000049BB00B01B18F07F000041FFD34C04001C40504458281508153407039500000049BB43B01B18F07F000041FFD34C48A001A80101B0014050445870AC01A4011574035E00000049BB43B01B18F07F000041FFD34C48A001A80101B0014050445870AC01A4011574039600000049BB43B01B18F07F000041FFD34C48A001B80101A801B0014050445870A4011574035F00000049BB43B01B18F07F000041FFD34C48A001B80101A801B0014050445870A4011574039700000049BB00B01B18F07F000041FFD34C48A001B80109A801B0014050445870A4011574039800000049BB00B01B18F07F000041FFD34C48A001B80108B0014050445870A4011574039900000049BB00B01B18F07F000041FFD34C48A001B001405044587008B801A4011574039A00000049BB00B01B18F07F000041FFD34C48000834B001405044587007B801A4011574039B00000049BB00B01B18F07F000041FFD34C480008B001405044587007B801A4011574039C00000049BB00B01B18F07F000041FFD34C480040504458700807A4011574039D00000049BB00B01B18F07F000041FFD34C4800083440504458700707A4011507039E00000049BB00B01B18F07F000041FFD34C4800084050445870291D040707A4011534039F00000049BB00B01B18F07F000041FFD34C480008394050445870291D040707A401153403A000000049BB00B01B18F07F000041FFD34C4800084050445870291D040707A401153403A100000049BB00B01B18F07F000041FFD34C480008394050445870291D040707A401153403A200000049BB00B01B18F07F000041FFD34C48000839214050445870291D040707A401153403A300000049BB00B01B18F07F000041FFD34C4800083921044050445870291D070707A401153403A400000049BB00B01B18F07F000041FFD34C48000839044050445870291D070707A401153403A500000049BB00B01B18F07F000041FFD34C480008044050445870291D070707A401153403A600000049BB43B01B18F07F000041FFD34C48A001C001BC0101C401405044587074A401036000000049BB43B01B18F07F000041FFD34C48A001C001BC0101C401405044587074A40103A700000049BB00B01B18F07F000041FFD34C48A001C001BC01C401405044587074A40103A800000049BB00B01B18F07F000041FFD34C480034C401405044587074A40103A900000049BB00B01B18F07F000041FFD34C4800C401405044587074A40103AA00000049BB00B01B18F07F000041FFD34C484050445870740703AB00000049BB00B01B18F07F000041FFD34C484050445870740703AC000000 -[19b752258484] jit-backend-dump} -[19b752259e10] {jit-backend-addr -Loop 4 ( #44 FOR_ITER) has address 7ff0181bd0c9 to 7ff0181bdce5 (bootstrap 7ff0181bd093) -[19b75225c912] jit-backend-addr} -[19b75225db5a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416765f +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BBE8C2FB16497F00004D8B3B4983C70149BBE8C2FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284C89BD70FFFFFF4D8B783048899D68FFFFFF498B58384889BD60FFFFFF498B78404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899540FFFFFF48898538FFFFFF4C89BD30FFFFFF48899D28FFFFFF4889BD20FFFFFF4C898518FFFFFF49BB00C3FB16497F00004D8B034983C00149BB00C3FB16497F00004D89034983FA040F85000000008139806300000F85000000004C8B51104D85D20F84000000004C8B4108498B7A10813FF0CE01000F85000000004D8B5208498B7A084939F80F83000000004D8B52104F8B54C2104D85D20F84000000004983C0014C8941084983FD000F850000000049BB98BD2814497F00004D39DE0F85000000004C8BB560FFFFFF4D8B6E0849BBA86B2814497F00004D39DD0F85000000004D8B451049BBC06B2814497F00004D39D80F850000000049BBE8822B14497F00004D8B2B49BBF0822B14497F00004D39DD0F850000000048898D10FFFFFF4C899508FFFFFF41BB201B8D0041FFD34C8B5040488B48504885C90F8500000000488B48284883F9000F850000000049BB40952B14497F0000498B0B4883F9000F8F00000000488B0C2500D785014881F9201288010F850000000049BB18832B14497F0000498B0B813910E001000F850000000049BB10832B14497F0000498B0B48898500FFFFFF488B042530255601488D5040483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C7008800000048C74008030000004889C24883C02848C700508A0100488968084C8BAD00FFFFFF41F6450401741951505241524C89EF4889C641BBF0C4C50041FFD3415A5A58594989454049896E1848C7421060CE830149BB60E82A14497F00004C895A1849BBB0F32A14497F00004C895A204C8995F8FEFFFF488995F0FEFFFF488985E8FEFFFF48898DE0FEFFFF48C78578FFFFFF740000004889D741BB3036920041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F8500000000488985D8FEFFFF488B042530255601488D5010483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700E0300000488B9560FFFFFF48896A184C8BADF0FEFFFF4C896808488985D0FEFFFF48C78578FFFFFF75000000488BBDE0FEFFFF4889C6488B95D8FEFFFF41BBA02E790041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85E0FEFFFF488B4018486BD218488B5410184883FA017206813AB0EB03000F85000000004881FAC02C72010F8400000000488B8500FFFFFF4C8B68504D85ED0F85000000004C8B68284983FD000F85000000004C8BADE8FEFFFF49C74508FDFFFFFF4C8BAD08FFFFFF498B4D1049BBFFFFFFFFFFFFFF7F4C39D90F8D000000004C8B72104C8B52184D8B46104983F8110F85000000004D8B46204C89C74983E0014983F8000F8400000000498B7E384883FF010F8F00000000498B7E184883C7014D8B44FE104983F8130F85000000004989F84883C701498B7CFE104983C0024883F9000F8E000000004983F80B0F85000000004883FF330F850000000049BB7081F916497F00004D39DE0F8500000000488995C8FEFFFF488B042530255601488D5060483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700D00001004889C24883C04848C700508A0100488968084C8BB500FFFFFF41F6460401741952514152504C89F74889C641BBF0C4C50041FFD358415A595A49894640488BBD60FFFFFF48896F1849BB7081F916497F00004C895A384C89521048894A084C896A40488985C0FEFFFF488995B8FEFFFF48C78578FFFFFF76000000BF000000004889D649BB13721614497F000041FFD34883F80274134889C7BE0000000041BB7053950041FFD3EB08488B0425D0D155014883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004885C00F8500000000488B8500FFFFFF4C8B70504D85F60F85000000004C8B70284983FE000F8500000000488B95F8FEFFFFF6400401741350524889C74889D641BBF0C4C50041FFD35A5848895040488BBDC0FEFFFF48C74708FDFFFFFF488B3C254845A0024883FF000F8C0000000049BB18C3FB16497F0000498B3B4883C70149BB18C3FB16497F000049893B488BBD10FFFFFF4C8B6F104D85ED0F8400000000488B4F084D8B551041813AF0CE01000F85000000004D8B6D084D8B55084C39D10F83000000004D8B6D104D8B6CCD104D85ED0F84000000004883C1014C8B9560FFFFFF4D8B420848894F0849BBA86B2814497F00004D39D80F8500000000498B481049BBC06B2814497F00004C39D90F850000000049BBE8822B14497F00004D8B0349BBF0822B14497F00004D39D80F85000000004983FE000F850000000049BB40952B14497F00004D8B334983FE000F8F000000004C8B342500D785014981FE201288010F850000000049BB18832B14497F00004D8B3341813E10E001000F850000000049BB10832B14497F00004D8B33488985B0FEFFFF488995A8FEFFFF488B042530255601488D5040483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C7008800000048C74008030000004889C24883C02848C700508A0100488968084C8B85B0FEFFFF41F6400401741D415050524152574C89C74889C641BBF0C4C50041FFD35F415A5A5841584989404049896A1848C7421060CE830149BB60E82A14497F00004C895A1849BBB0F32A14497F00004C895A204C89AD08FFFFFF488995A0FEFFFF4C89B598FEFFFF48898590FEFFFF48C78578FFFFFF770000004889D741BB3036920041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F850000000048898588FEFFFF488B042530255601488D5010483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700E0300000488B9560FFFFFF48896A184C8B85A0FEFFFF4C89400848898580FEFFFF48C78578FFFFFF78000000488BBD98FEFFFF4889C6488B9588FEFFFF41BBA02E790041FFD34883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B8598FEFFFF488B4018486BD218488B5410184883FA017206813AB0EB03000F85000000004881FAC02C72010F8400000000488B85B0FEFFFF4C8B40504D85C00F85000000004C8B40284983F8000F85000000004C8B8590FEFFFF49C74008FDFFFFFF4C8B8508FFFFFF4D8B701049BBFFFFFFFFFFFFFF7F4D39DE0F8D000000004C8B5210488B7A184D8B6A104983FD110F85000000004D8B6A204C89E94983E5014983FD000F8400000000498B4A384883F9010F8F00000000498B4A184883C1014D8B6CCA104983FD130F85000000004989CD4883C101498B4CCA104983C5024983FE000F8E000000004983FD0B0F85000000004883F9330F850000000049BB7081F916497F00004D39DA0F850000000048899578FEFFFF488B042530255601488D5060483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700D00001004889C24883C04848C700508A0100488968084C8B95B0FEFFFF41F6420401741D415250415052574C89D74889C641BBF0C4C50041FFD35F5A415858415A49894240488B8D60FFFFFF4889691849BB7081F916497F00004C895A3848897A104C8972084C89424048899570FEFFFF48898568FEFFFF48C78578FFFFFF79000000BF000000004889D649BB13721614497F000041FFD34883F80274134889C7BE0000000041BB7053950041FFD3EB08488B0425D0D155014883BD78FFFFFF000F8C0000000048833C25A046A002000F85000000004885C00F8500000000488B85B0FEFFFF4C8B50504D85D20F85000000004C8B50284983FA000F8500000000488B8DA8FEFFFFF64004017417505141524889C74889CE41BBF0C4C50041FFD3415A5958488948404C8B8568FEFFFF49C74008FDFFFFFF4C8B04254845A0024983F8000F8C000000004D89D64889CAE965FAFFFF49BB00501614497F000041FFD3294C48403835505544585C046064686C037A00000049BB00501614497F000041FFD34C48044038355044585C6064686C037B00000049BB00501614497F000041FFD34C4804284038355044585C6064686C037C00000049BB00501614497F000041FFD34C4804211C284038355044585C6064686C037D00000049BB00501614497F000041FFD34C4804211D284038355044585C6064686C037E00000049BB00501614497F000041FFD34C480421284038355044585C6064686C037F00000049BB00501614497F000041FFD3354C4840385044585C0464686C28038000000049BB00501614497F000041FFD34C4838405044580464686C28038100000049BB00501614497F000041FFD34C3834405044580464686C28038200000049BB00501614497F000041FFD34C382034405044580464686C28038300000049BB00501614497F000041FFD34C3834405044580464686C28038400000049BB00501614497F000041FFD34C3834405044580464686C28038500000049BB00501614497F000041FFD34C3800044050445870152874038600000049BB00501614497F000041FFD34C38004050445870152874038700000049BB00501614497F000041FFD34C38004050445870152874038800000049BB00501614497F000041FFD34C3800054050445870152874038900000049BB00501614497F000041FFD34C380004405044587015152874038A00000049BB00501614497F000041FFD34C380004405044587015152874038B00000049BB43501614497F000041FFD34C48788801018401405044587015747C8001037400000049BB43501614497F000041FFD34C48788801018401405044587015747C8001038C00000049BB43501614497F000041FFD34C487890010188018401405044587074157C037500000049BB43501614497F000041FFD34C487890010188018401405044587074157C038D00000049BB00501614497F000041FFD34C487890010988018401405044587074157C038E00000049BB00501614497F000041FFD34C48789001088401405044587074157C038F00000049BB00501614497F000041FFD34C48788401405044587008900174157C039000000049BB00501614497F000041FFD34C480008348401405044587007900174157C039100000049BB00501614497F000041FFD34C4800088401405044587007900174157C039200000049BB00501614497F000041FFD34C48004050445870080774157C039300000049BB00501614497F000041FFD34C480008344050445870070707157C039400000049BB00501614497F000041FFD34C4800084050445870380529070734157C039500000049BB00501614497F000041FFD34C4800081D4050445870380529070734157C039600000049BB00501614497F000041FFD34C4800084050445870380529070734157C039700000049BB00501614497F000041FFD34C4800081D4050445870380529070734157C039800000049BB00501614497F000041FFD34C4800081D214050445870380529070734157C039900000049BB00501614497F000041FFD34C4800081D21384050445870070529070734157C039A00000049BB00501614497F000041FFD34C4800081D384050445870070529070734157C039B00000049BB00501614497F000041FFD34C480008384050445870070529070734157C039C00000049BB43501614497F000041FFD34C48789C0194010198014050445870747C037600000049BB43501614497F000041FFD34C48789C0194010198014050445870747C039D00000049BB00501614497F000041FFD34C48789C01940198014050445870747C039E00000049BB00501614497F000041FFD34C48003898014050445870747C039F00000049BB00501614497F000041FFD34C480098014050445870747C03A000000049BB00501614497F000041FFD34C484050445870740703A100000049BB00501614497F000041FFD34C484050445870740703A200000049BB00501614497F000041FFD34C481C34405044587403A300000049BB00501614497F000041FFD34C481C052834405044587403A400000049BB00501614497F000041FFD34C481C052934405044587403A500000049BB00501614497F000041FFD34C481C0534405044587403A600000049BB00501614497F000041FFD34C2820405044581C340703A700000049BB00501614497F000041FFD34C280420405044581C340703A800000049BB00501614497F000041FFD34C2820405044581C340703A900000049BB00501614497F000041FFD34C2820405044581C340703AA00000049BB00501614497F000041FFD34C2800405044581C1508340703AB00000049BB00501614497F000041FFD34C280039405044581C1508340703AC00000049BB00501614497F000041FFD34C280038405044581C151508340703AD00000049BB00501614497F000041FFD34C280038405044581C151508340703AE00000049BB43501614497F000041FFD34C48A001AC0101B001405044587015A80174A401037700000049BB43501614497F000041FFD34C48A001AC0101B001405044587015A80174A40103AF00000049BB43501614497F000041FFD34C48A001B80101AC01B00140504458701574A401037800000049BB43501614497F000041FFD34C48A001B80101AC01B00140504458701574A40103B000000049BB00501614497F000041FFD34C48A001B80109AC01B00140504458701574A40103B100000049BB00501614497F000041FFD34C48A001B80108B00140504458701574A40103B200000049BB00501614497F000041FFD34C48A001B001405044587008B8011574A40103B300000049BB00501614497F000041FFD34C48000820B001405044587007B8011574A40103B400000049BB00501614497F000041FFD34C480008B001405044587007B8011574A40103B500000049BB00501614497F000041FFD34C4800405044587008071574A40103B600000049BB00501614497F000041FFD34C48000820405044587007071507A40103B700000049BB00501614497F000041FFD34C480008405044587028391D07071520A40103B800000049BB00501614497F000041FFD34C48000805405044587028391D07071520A40103B900000049BB00501614497F000041FFD34C480008405044587028391D07071520A40103BA00000049BB00501614497F000041FFD34C48000805405044587028391D07071520A40103BB00000049BB00501614497F000041FFD34C4800080535405044587028391D07071520A40103BC00000049BB00501614497F000041FFD34C480008053528405044587007391D07071520A40103BD00000049BB00501614497F000041FFD34C4800080528405044587007391D07071520A40103BE00000049BB00501614497F000041FFD34C48000828405044587007391D07071520A40103BF00000049BB43501614497F000041FFD34C48A001C001BC0101C401405044587074A401037900000049BB43501614497F000041FFD34C48A001C001BC0101C401405044587074A40103C000000049BB00501614497F000041FFD34C48A001C001BC01C401405044587074A40103C100000049BB00501614497F000041FFD34C480028C401405044587074A40103C200000049BB00501614497F000041FFD34C4800C401405044587074A40103C300000049BB00501614497F000041FFD34C484050445870740703C400000049BB00501614497F000041FFD34C484050445870740703C5000000 +[b2358fa1b0f] jit-backend-dump} +[b2358fa27d3] {jit-backend-addr +Loop 6 ( #44 FOR_ITER) has address 7f4914167695 to 7f49141682b8 (bootstrap 7f491416765f) +[b2358fa3cd9] jit-backend-addr} +[b2358fa4b27] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd0c5 +0 E0FDFFFF -[19b75225f936] jit-backend-dump} -[19b752260bcc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167691 +0 E0FDFFFF +[b2358fa5821] jit-backend-dump} +[b2358fa6345] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1a2 +0 3F0B0000 -[19b75226231e] jit-backend-dump} -[19b752262de0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416776e +0 460B0000 +[b2358fa6dcf] jit-backend-dump} +[b2358fa733b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1ae +0 550B0000 -[19b752264478] jit-backend-dump} -[19b752264f58] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416777a +0 5C0B0000 +[b2358fa7ef9] jit-backend-dump} +[b2358fa83dd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1bb +0 680B0000 -[19b7522665c0] jit-backend-dump} -[19b752266fec] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167787 +0 6F0B0000 +[b2358fa8cc1] jit-backend-dump} +[b2358fa9113] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1cf +0 750B0000 -[19b7522685f4] jit-backend-dump} -[19b752268f30] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416779b +0 7C0B0000 +[b2358fa99f1] jit-backend-dump} +[b2358fa9e0d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1e0 +0 870B0000 -[19b75226a424] jit-backend-dump} -[19b75226ad3c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141677ac +0 8E0B0000 +[b2358faa831] jit-backend-dump} +[b2358faad29] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd1f2 +0 980B0000 -[19b75226c272] jit-backend-dump} -[19b75226cbf6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141677be +0 9F0B0000 +[b2358fab7a7] jit-backend-dump} +[b2358fabbdd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd204 +0 A80B0000 -[19b75226e1d4] jit-backend-dump} -[19b75226ebee] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141677d0 +0 AF0B0000 +[b2358fac48f] jit-backend-dump} +[b2358fac89d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd217 +0 B50B0000 -[19b7522701f0] jit-backend-dump} -[19b752270b62] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141677e3 +0 BC0B0000 +[b2358fad177] jit-backend-dump} +[b2358fad585] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd235 +0 B50B0000 -[19b752272032] jit-backend-dump} -[19b75227297a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167801 +0 BC0B0000 +[b2358fade29] jit-backend-dump} +[b2358fae311] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd24c +0 BC0B0000 -[19b752273ebc] jit-backend-dump} -[19b752274b88] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167818 +0 C30B0000 +[b2358faed4f] jit-backend-dump} +[b2358faf4e5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd26c +0 D90B0000 -[19b752276100] jit-backend-dump} -[19b752276b56] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167838 +0 E00B0000 +[b2358faffa5] jit-backend-dump} +[b2358fb04bb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd294 +0 CF0B0000 -[19b75227824e] jit-backend-dump} -[19b752278c56] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167860 +0 D60B0000 +[b2358fb0f21] jit-backend-dump} +[b2358fb133f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd2a2 +0 DF0B0000 -[19b75227a15c] jit-backend-dump} -[19b75227ab88] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416786e +0 E60B0000 +[b2358fb1bd3] jit-backend-dump} +[b2358fb2063] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd2b9 +0 020C0000 -[19b75227c09a] jit-backend-dump} -[19b75227c9c4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167885 +0 090C0000 +[b2358fb2b6d] jit-backend-dump} +[b2358fb30a9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd2ce +0 0B0C0000 -[19b75227dedc] jit-backend-dump} -[19b75227e7dc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416789a +0 120C0000 +[b2358fb3aab] jit-backend-dump} +[b2358fb3f99] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd2e7 +0 110C0000 -[19b75227fef2] jit-backend-dump} -[19b7522808ee] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141678b3 +0 180C0000 +[b2358fb48fd] jit-backend-dump} +[b2358fb4e5d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd3e8 +0 2F0B0000 -[19b752281e6c] jit-backend-dump} -[19b75228285c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141679b4 +0 360B0000 +[b2358fb57cd] jit-backend-dump} +[b2358fb7e23] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd3f7 +0 440B0000 -[19b752283d74] jit-backend-dump} -[19b752284692] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141679c3 +0 4B0B0000 +[b2358fb888f] jit-backend-dump} +[b2358fb8dd1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd48d +0 D20A0000 -[19b752285b8c] jit-backend-dump} -[19b7522864bc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167a59 +0 D90A0000 +[b2358fb9863] jit-backend-dump} +[b2358fb9d85] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd49c +0 E70A0000 -[19b7522879fe] jit-backend-dump} -[19b7522884e4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167a68 +0 EE0A0000 +[b2358fba723] jit-backend-dump} +[b2358fbab2f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd4b6 +0 F10A0000 -[19b75228dac4] jit-backend-dump} -[19b75228e700] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167a82 +0 F80A0000 +[b2358fbb3d1] jit-backend-dump} +[b2358fbb7df] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd4dc +0 EF0A0000 -[19b75228fee2] jit-backend-dump} -[19b7522908f0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167aa8 +0 F60A0000 +[b2358fbc1ed] jit-backend-dump} +[b2358fbc5e7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd4e9 +0 050B0000 -[19b752291ee0] jit-backend-dump} -[19b7522928f4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167ab5 +0 0B0B0000 +[b2358fbcfc7] jit-backend-dump} +[b2358fbd525] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd4fd +0 140B0000 -[19b752293ede] jit-backend-dump} -[19b7522948e6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167ac9 +0 190B0000 +[b2358fbdf0b] jit-backend-dump} +[b2358fbe347] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd50b +0 2B0B0000 -[19b752295ee2] jit-backend-dump} -[19b7522969b6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167ad7 +0 2F0B0000 +[b2358fbebe3] jit-backend-dump} +[b2358fbf07f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd538 +0 420B0000 -[19b752297fdc] jit-backend-dump} -[19b7522988f4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b04 +0 440B0000 +[b2358fbf92d] jit-backend-dump} +[b2358fbfd27] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd54e +0 4E0B0000 -[19b752299e0c] jit-backend-dump} -[19b75229a75a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b1a +0 4F0B0000 +[b2358fc05bd] jit-backend-dump} +[b2358fc0ac3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd563 +0 5D0B0000 -[19b75229bc9c] jit-backend-dump} -[19b75229c5de] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b2f +0 5D0B0000 +[b2358fc1591] jit-backend-dump} +[b2358fc1a93] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd571 +0 740B0000 -[19b75229daea] jit-backend-dump} -[19b75229e5b2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b3d +0 730B0000 +[b2358fc248b] jit-backend-dump} +[b2358fc2989] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd588 +0 810B0000 -[19b75229fc14] jit-backend-dump} -[19b7522a0628] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b54 +0 7F0B0000 +[b2358fc3235] jit-backend-dump} +[b2358fc3631] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd5a2 +0 8C0B0000 -[19b7522a1be8] jit-backend-dump} -[19b7522a250c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b6e +0 890B0000 +[b2358fc3ed9] jit-backend-dump} +[b2358fc42e3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd5ac +0 A80B0000 -[19b7522a3a54] jit-backend-dump} -[19b7522a43a2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b78 +0 A40B0000 +[b2358fc4b7f] jit-backend-dump} +[b2358fc509d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd5b6 +0 C50B0000 -[19b7522a58a2] jit-backend-dump} -[19b7522a61e4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b82 +0 C00B0000 +[b2358fc5a95] jit-backend-dump} +[b2358fc5f9d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd5c9 +0 D80B0000 -[19b7522a7900] jit-backend-dump} -[19b7522a830e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167b95 +0 D20B0000 +[b2358fc6849] jit-backend-dump} +[b2358fc6c51] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd6ce +0 F80A0000 -[19b7522a98d4] jit-backend-dump} -[19b7522aa26a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167c9a +0 F10A0000 +[b2358fc74fb] jit-backend-dump} +[b2358fc78f9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd6dd +0 0D0B0000 -[19b7522ab866] jit-backend-dump} -[19b7522ac1f0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167ca9 +0 050B0000 +[b2358fc8193] jit-backend-dump} +[b2358fc8599] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd6e6 +0 280B0000 -[19b7522ad6f0] jit-backend-dump} -[19b7522adfe4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167cb2 +0 1F0B0000 +[b2358fc8fc3] jit-backend-dump} +[b2358fc94d7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd6fa +0 370B0000 -[19b7522af520] jit-backend-dump} -[19b7522affd6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167cc6 +0 2D0B0000 +[b2358fc9f9d] jit-backend-dump} +[b2358fca493] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd708 +0 490B0000 -[19b7522b163e] jit-backend-dump} -[19b7522b20b2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167cd4 +0 3E0B0000 +[b2358fcad39] jit-backend-dump} +[b2358fcb1b7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd74f +0 3C0B0000 -[19b7522b36b4] jit-backend-dump} -[19b7522b3ff0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d19 +0 320B0000 +[b2358fcba51] jit-backend-dump} +[b2358fcbe91] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd781 +0 250B0000 -[19b7522b550e] jit-backend-dump} -[19b7522b5e26] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d4b +0 1B0B0000 +[b2358fcc739] jit-backend-dump} +[b2358fccb3f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd795 +0 2C0B0000 -[19b7522b737a] jit-backend-dump} -[19b7522b7c98] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d60 +0 210B0000 +[b2358fcd54f] jit-backend-dump} +[b2358fcd95b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd7a6 +0 380B0000 -[19b7522b91aa] jit-backend-dump} -[19b7522b9c0c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d71 +0 2D0B0000 +[b2358fce207] jit-backend-dump} +[b2358fce607] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd7b8 +0 430B0000 -[19b7522bb32e] jit-backend-dump} -[19b7522bbca6] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d83 +0 380B0000 +[b2358fceed9] jit-backend-dump} +[b2358fcf2e9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd7de +0 390B0000 -[19b7522bd1c4] jit-backend-dump} -[19b7522bdaf4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167da9 +0 2E0B0000 +[b2358fcfb91] jit-backend-dump} +[b2358fcffb1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd7f5 +0 3E0B0000 -[19b7522bf04e] jit-backend-dump} -[19b7522bfd1a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167dc0 +0 330B0000 +[b2358fd0b81] jit-backend-dump} +[b2358fd1255] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd815 +0 570B0000 -[19b7522c125c] jit-backend-dump} -[19b7522c1cee] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167de0 +0 4C0B0000 +[b2358fd1b17] jit-backend-dump} +[b2358fd3ebf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd81f +0 690B0000 -[19b7522c3248] jit-backend-dump} -[19b7522c3b8a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167dea +0 5E0B0000 +[b2358fd4a01] jit-backend-dump} +[b2358fd4f2b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd836 +0 700B0000 -[19b7522c50ae] jit-backend-dump} -[19b7522c59d2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167e01 +0 650B0000 +[b2358fd59c7] jit-backend-dump} +[b2358fd5ebb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd84b +0 7A0B0000 -[19b7522c6f26] jit-backend-dump} -[19b7522cbb4c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167e16 +0 6F0B0000 +[b2358fd6873] jit-backend-dump} +[b2358fd6d67] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd864 +0 810B0000 -[19b7522cd4fc] jit-backend-dump} -[19b7522ce03c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167e30 +0 750B0000 +[b2358fd7789] jit-backend-dump} +[b2358fd7ca7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd972 +0 930A0000 -[19b7522cf632] jit-backend-dump} -[19b7522d006a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167f3c +0 890A0000 +[b2358fd8569] jit-backend-dump} +[b2358fd8985] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd981 +0 AA0A0000 -[19b7522d168a] jit-backend-dump} -[19b7522d20a4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167f4b +0 A00A0000 +[b2358fd922f] jit-backend-dump} +[b2358fd9641] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda17 +0 3A0A0000 -[19b7522d364c] jit-backend-dump} -[19b7522d3fa0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167fe1 +0 300A0000 +[b2358fd9f11] jit-backend-dump} +[b2358fda321] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda26 +0 510A0000 -[19b7522d5488] jit-backend-dump} -[19b7522d5de8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167ff0 +0 470A0000 +[b2358fdacef] jit-backend-dump} +[b2358fdb217] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda40 +0 5D0A0000 -[19b7522d7300] jit-backend-dump} -[19b7522d7c84] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416800a +0 530A0000 +[b2358fdbc6d] jit-backend-dump} +[b2358fdc15d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda66 +0 5D0A0000 -[19b7522d92b6] jit-backend-dump} -[19b7522d9d12] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168030 +0 530A0000 +[b2358fdca07] jit-backend-dump} +[b2358fdce11] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda73 +0 740A0000 -[19b7522db2ea] jit-backend-dump} -[19b7522dbc38] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416803d +0 6A0A0000 +[b2358fdd7a3] jit-backend-dump} +[b2358fddb85] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda87 +0 840A0000 -[19b7522dd1a4] jit-backend-dump} -[19b7522ddb04] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168051 +0 7A0A0000 +[b2358fde435] jit-backend-dump} +[b2358fde93f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bda95 +0 9B0A0000 -[19b7522df01c] jit-backend-dump} -[19b7522dfa66] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416805f +0 910A0000 +[b2358fdf38b] jit-backend-dump} +[b2358fdf949] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdac2 +0 B20A0000 -[19b7522e10a4] jit-backend-dump} -[19b7522e1b0c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416808c +0 A80A0000 +[b2358fe033d] jit-backend-dump} +[b2358fe0795] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdad8 +0 BE0A0000 -[19b7522e318c] jit-backend-dump} -[19b7522e3bee] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141680a2 +0 B40A0000 +[b2358fe1041] jit-backend-dump} +[b2358fe1453] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdaed +0 CD0A0000 -[19b7522e50d0] jit-backend-dump} -[19b7522e5a0c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141680b7 +0 C30A0000 +[b2358fe1d0b] jit-backend-dump} +[b2358fe2125] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdafb +0 E40A0000 -[19b7522e6f36] jit-backend-dump} -[19b7522e78ae] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141680c5 +0 DA0A0000 +[b2358fe2b35] jit-backend-dump} +[b2358fe306b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdb12 +0 F10A0000 -[19b7522e8d8a] jit-backend-dump} -[19b7522e96d8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141680dc +0 E70A0000 +[b2358fe3f39] jit-backend-dump} +[b2358fe436b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdb2c +0 FC0A0000 -[19b7522eada0] jit-backend-dump} -[19b7522eb7d8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141680f6 +0 F20A0000 +[b2358fe4c0f] jit-backend-dump} +[b2358fe501d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdb36 +0 180B0000 -[19b7522ecd6e] jit-backend-dump} -[19b7522ed722] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168100 +0 0E0B0000 +[b2358fe58ed] jit-backend-dump} +[b2358fe5cf9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdb40 +0 350B0000 -[19b7522eec40] jit-backend-dump} -[19b7522ef594] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416810a +0 2B0B0000 +[b2358fe659b] jit-backend-dump} +[b2358fe6a9f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdb53 +0 480B0000 -[19b7522f0a70] jit-backend-dump} -[19b7522f13c4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416811d +0 3E0B0000 +[b2358fe752d] jit-backend-dump} +[b2358fe7a41] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdc59 +0 670A0000 -[19b7522f28e8] jit-backend-dump} -[19b7522f33aa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168226 +0 5A0A0000 +[b2358fe8499] jit-backend-dump} +[b2358fe88d1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdc68 +0 7D0A0000 -[19b7522f49e8] jit-backend-dump} -[19b7522f5420] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168235 +0 700A0000 +[b2358fe919f] jit-backend-dump} +[b2358fe95ab] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdc71 +0 990A0000 -[19b7522f69f8] jit-backend-dump} -[19b7522f73a0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416823e +0 8C0A0000 +[b2358fe9e4d] jit-backend-dump} +[b2358fea25f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdc85 +0 A90A0000 -[19b7522f8882] jit-backend-dump} -[19b7522f91d0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168252 +0 9C0A0000 +[b2358feabad] jit-backend-dump} +[b2358feb0d5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdc93 +0 BB0A0000 -[19b7522fa6e8] jit-backend-dump} -[19b7522fb0de] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168260 +0 AE0A0000 +[b2358febb61] jit-backend-dump} +[b2358fec0c7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdcd6 +0 B20A0000 -[19b7522fc7e2] jit-backend-dump} -[19b7522fdece] jit-backend} -[19b75230206e] {jit-log-opt-loop -# Loop 4 ( #44 FOR_ITER) : loop with 351 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141682a9 +0 9F0A0000 +[b2358fec987] jit-backend-dump} +[b2358fed65b] jit-backend} +[b2358fefab1] {jit-log-opt-loop +# Loop 6 ( #44 FOR_ITER) : loop with 351 ops [p0, p1] +84: p2 = getfield_gc(p0, descr=) +88: p3 = getfield_gc(p0, descr=) @@ -1534,36 +1818,36 @@ +157: p22 = getarrayitem_gc(p8, 6, descr=) +168: p24 = getarrayitem_gc(p8, 7, descr=) +172: p25 = getfield_gc(p0, descr=) -+172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140669221670848)) ++172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(139951894600368)) debug_merge_point(0, ' #44 FOR_ITER') -+265: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] -+275: guard_class(p16, 38562496, descr=) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++265: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++275: guard_class(p16, 38562496, descr=) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +287: p28 = getfield_gc(p16, descr=) -+291: guard_nonnull(p28, descr=) [p1, p0, p16, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++291: guard_nonnull(p28, descr=) [p1, p0, p16, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +300: i29 = getfield_gc(p16, descr=) +304: p30 = getfield_gc(p28, descr=) -+308: guard_class(p30, 38655536, descr=) [p1, p0, p16, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++308: guard_class(p30, 38655536, descr=) [p1, p0, p16, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +320: p32 = getfield_gc(p28, descr=) +324: i33 = getfield_gc(p32, descr=) +328: i34 = uint_ge(i29, i33) -guard_false(i34, descr=) [p1, p0, p16, i29, i33, p32, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +guard_false(i34, descr=) [p1, p0, p16, i29, i33, p32, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +337: p35 = getfield_gc(p32, descr=) +341: p36 = getarrayitem_gc(p35, i29, descr=) -+346: guard_nonnull(p36, descr=) [p1, p0, p16, i29, p36, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++346: guard_nonnull(p36, descr=) [p1, p0, p16, i29, p36, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] +355: i38 = int_add(i29, 1) +359: setfield_gc(p16, i38, descr=) -+363: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, p24, p36] ++363: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, p24, p36] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+373: guard_value(p3, ConstPtr(ptr40), descr=) [p1, p0, p3, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++373: guard_value(p3, ConstPtr(ptr40), descr=) [p1, p0, p3, p2, p5, p10, p12, p16, p20, p22, p24, p36] +392: p41 = getfield_gc(p0, descr=) -+403: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++403: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] +422: p43 = getfield_gc(p41, descr=) -+426: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] -+445: guard_not_invalidated(, descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++426: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++445: guard_not_invalidated(, descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #53 LOOKUP_METHOD') +445: p46 = getfield_gc(ConstPtr(ptr45), descr=) -+458: guard_value(p46, ConstPtr(ptr47), descr=) [p1, p0, p46, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++458: guard_value(p46, ConstPtr(ptr47), descr=) [p1, p0, p46, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') debug_merge_point(0, ' #62 CALL_METHOD') @@ -1571,22 +1855,22 @@ +500: p50 = getfield_gc(p49, descr=) +504: i51 = force_token() +504: p52 = getfield_gc(p49, descr=) -+508: guard_isnull(p52, descr=) [p1, p0, p49, p52, p2, p5, p10, p12, p16, i51, p50, p36] ++508: guard_isnull(p52, descr=) [p1, p0, p49, p52, p2, p5, p10, p12, p16, i51, p50, p36] +517: i53 = getfield_gc(p49, descr=) +521: i54 = int_is_zero(i53) -guard_true(i54, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i51, p50, p36] +guard_true(i54, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i51, p50, p36] debug_merge_point(1, ' #0 LOAD_GLOBAL') -+531: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i51, p50, p36] ++531: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i51, p50, p36] debug_merge_point(1, ' #3 LOAD_FAST') debug_merge_point(1, ' #6 LOAD_FAST') debug_merge_point(1, ' #9 CALL_FUNCTION') +531: i56 = getfield_gc(ConstPtr(ptr55), descr=) +544: i58 = int_ge(0, i56) -guard_true(i58, descr=) [p1, p0, p49, i56, p2, p5, p10, p12, p16, i51, p50, p36] +guard_true(i58, descr=) [p1, p0, p49, i56, p2, p5, p10, p12, p16, i51, p50, p36] +554: i59 = force_token() debug_merge_point(2, ' #0 LOAD_GLOBAL') +554: p61 = getfield_gc(ConstPtr(ptr60), descr=) -+562: guard_value(p61, ConstPtr(ptr62), descr=) [p1, p0, p49, p61, p2, p5, p10, p12, p16, i59, i51, p50, p36] ++562: guard_value(p61, ConstPtr(ptr62), descr=) [p1, p0, p49, p61, p2, p5, p10, p12, p16, i59, i51, p50, p36] debug_merge_point(2, ' #3 LOAD_FAST') debug_merge_point(2, ' #6 LOAD_CONST') debug_merge_point(2, ' #9 BINARY_SUBSCR') @@ -1600,7 +1884,7 @@ debug_merge_point(2, ' #29 LOAD_FAST') debug_merge_point(2, ' #32 CALL_METHOD') +575: p64 = getfield_gc(ConstPtr(ptr63), descr=) -+588: guard_class(p64, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p64, p2, p5, p10, p12, p16, i59, i51, p50, p36] ++588: guard_class(p64, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p64, p2, p5, p10, p12, p16, i59, i51, p50, p36] +600: p66 = getfield_gc(ConstPtr(ptr63), descr=) +613: i67 = force_token() p69 = new_array(3, descr=) @@ -1612,69 +1896,69 @@ +764: setarrayitem_gc(p69, 1, ConstPtr(ptr75), descr=) +778: setarrayitem_gc(p69, 2, ConstPtr(ptr77), descr=) +792: i79 = call_may_force(ConstClass(hash_tuple), p69, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p36, p69, p50] -+857: guard_no_exception(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p36, p69, p50] +guard_not_forced(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p36, p50, p69] ++857: guard_no_exception(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p36, p50, p69] +872: i80 = force_token() p82 = new_with_vtable(38549536) +942: setfield_gc(p0, i80, descr=) +953: setfield_gc(p82, p69, descr=) +964: i84 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v693___simple_call__function_l), p66, p82, i79, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p36, p50] -+1022: guard_no_exception(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p36, p50] +guard_not_forced(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, i51, p50] ++1022: guard_no_exception(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, i51, p50] +1037: i86 = int_and(i84, -9223372036854775808) +1053: i87 = int_is_true(i86) -guard_false(i87, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p36, p50] +guard_false(i87, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, i51, p50] +1063: p88 = getfield_gc(p66, descr=) +1074: p89 = getinteriorfield_gc(p88, i84, descr=>) -+1083: guard_nonnull_class(p89, 38793968, descr=) [p1, p0, p49, p82, p89, p71, p2, p5, p10, p12, p16, i51, p36, p50] ++1083: guard_nonnull_class(p89, 38793968, descr=) [p1, p0, p49, p82, p89, p71, p2, p5, p10, p12, p16, p36, i51, p50] debug_merge_point(2, ' #35 STORE_FAST') debug_merge_point(2, ' #38 LOAD_FAST') debug_merge_point(2, ' #41 LOAD_CONST') debug_merge_point(2, ' #44 COMPARE_OP') +1101: i92 = instance_ptr_eq(ConstPtr(ptr91), p89) -guard_false(i92, descr=) [p1, p0, p49, p71, p2, p5, p10, p12, p16, p89, p82, i51, p36, p50] +guard_false(i92, descr=) [p1, p0, p49, p71, p2, p5, p10, p12, p16, p89, p82, p36, i51, p50] debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') debug_merge_point(2, ' #50 LOAD_FAST') debug_merge_point(2, ' #53 RETURN_VALUE') +1114: p93 = getfield_gc(p49, descr=) -+1125: guard_isnull(p93, descr=) [p1, p0, p49, p89, p93, p71, p2, p5, p10, p12, p16, None, p82, i51, p36, p50] ++1125: guard_isnull(p93, descr=) [p1, p0, p49, p89, p93, p71, p2, p5, p10, p12, p16, None, p82, p36, i51, p50] +1134: i95 = getfield_gc(p49, descr=) +1138: i96 = int_is_true(i95) -guard_false(i96, descr=) [p1, p0, p49, p89, p71, p2, p5, p10, p12, p16, None, p82, i51, p36, p50] +guard_false(i96, descr=) [p1, p0, p49, p89, p71, p2, p5, p10, p12, p16, None, p82, p36, i51, p50] +1148: p97 = getfield_gc(p49, descr=) debug_merge_point(1, ' #12 LOOKUP_METHOD') +1148: setfield_gc(p71, -3, descr=) debug_merge_point(1, ' #15 LOAD_FAST') debug_merge_point(1, ' #18 CALL_METHOD') -+1163: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p89, None, i51, p36, p50] ++1163: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p89, None, p36, i51, p50] +1163: i99 = strlen(p36) +1174: i101 = int_gt(9223372036854775807, i99) -guard_true(i101, descr=) [p1, p0, p49, p89, p36, p2, p5, p10, p12, p16, None, None, i51, None, p50] +guard_true(i101, descr=) [p1, p0, p49, p89, p36, p2, p5, p10, p12, p16, None, None, None, i51, p50] +1193: p102 = getfield_gc_pure(p89, descr=) +1197: i103 = getfield_gc_pure(p89, descr=) +1201: i105 = getarrayitem_gc_pure(p102, 0, descr=) +1205: i107 = int_eq(i105, 17) -guard_true(i107, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p36, p50] +guard_true(i107, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, p102, i99, i103, None, None, p36, i51, p50] +1215: i109 = getarrayitem_gc_pure(p102, 2, descr=) +1219: i111 = int_and(i109, 1) +1226: i112 = int_is_true(i111) -guard_true(i112, descr=) [p1, p0, p49, p89, i109, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p36, p50] +guard_true(i112, descr=) [p1, p0, p49, p89, i109, p2, p5, p10, p12, p16, p102, i99, i103, None, None, p36, i51, p50] +1236: i114 = getarrayitem_gc_pure(p102, 5, descr=) +1240: i116 = int_gt(i114, 1) -guard_false(i116, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p36, p50] +guard_false(i116, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, p102, i99, i103, None, None, p36, i51, p50] +1250: i118 = getarrayitem_gc_pure(p102, 1, descr=) +1254: i120 = int_add(i118, 1) +1258: i121 = getarrayitem_gc_pure(p102, i120, descr=) +1263: i123 = int_eq(i121, 19) -guard_true(i123, descr=) [p1, p0, p49, p89, i120, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p36, p50] +guard_true(i123, descr=) [p1, p0, p49, p89, i120, p2, p5, p10, p12, p16, p102, i99, i103, None, None, p36, i51, p50] +1273: i125 = int_add(i120, 1) +1280: i126 = getarrayitem_gc_pure(p102, i125, descr=) +1285: i128 = int_add(i120, 2) +1289: i130 = int_lt(0, i99) -guard_true(i130, descr=) [p1, p0, p49, p89, i126, i128, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p36, p50] -+1299: guard_value(i128, 11, descr=) [p1, p0, p49, p89, i126, i128, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p36, p50] -+1309: guard_value(i126, 51, descr=) [p1, p0, p49, p89, i126, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p36, p50] -+1319: guard_value(p102, ConstPtr(ptr133), descr=) [p1, p0, p49, p89, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p36, p50] +guard_true(i130, descr=) [p1, p0, p49, p89, i126, i128, p2, p5, p10, p12, p16, p102, i99, i103, None, None, p36, i51, p50] ++1299: guard_value(i128, 11, descr=) [p1, p0, p49, p89, i126, i128, p102, p2, p5, p10, p12, p16, None, i99, i103, None, None, p36, i51, p50] ++1309: guard_value(i126, 51, descr=) [p1, p0, p49, p89, i126, p102, p2, p5, p10, p12, p16, None, i99, i103, None, None, p36, i51, p50] ++1319: guard_value(p102, ConstPtr(ptr133), descr=) [p1, p0, p49, p89, p102, p2, p5, p10, p12, p16, None, i99, i103, None, None, p36, i51, p50] debug_merge_point(2, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') +1338: i134 = force_token() p136 = new_with_vtable(38602768) @@ -1686,69 +1970,69 @@ +1494: setfield_gc(p136, i103, descr=) +1498: setfield_gc(p136, i99, descr=) +1502: setfield_gc(p136, p36, descr=) -+1506: i138 = call_assembler(0, p136, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p36, p50] -+1599: guard_no_exception(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p36, p50] -+1614: guard_false(i138, descr=) [p1, p0, p49, p136, p89, p137, p2, p5, p10, p12, p16, p36, p50] ++1506: i138 = call_assembler(0, p136, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p36, p50] ++1599: guard_no_exception(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p36, p50] ++1614: guard_false(i138, descr=) [p1, p0, p49, p136, p89, p137, p2, p5, p10, p12, p16, p36, p50] debug_merge_point(1, ' #21 RETURN_VALUE') +1623: p139 = getfield_gc(p49, descr=) -+1634: guard_isnull(p139, descr=) [p1, p0, p49, p139, p137, p2, p5, p10, p12, p16, p36, p50] ++1634: guard_isnull(p139, descr=) [p1, p0, p49, p139, p137, p2, p5, p10, p12, p16, p36, p50] +1643: i140 = getfield_gc(p49, descr=) +1647: i141 = int_is_true(i140) -guard_false(i141, descr=) [p1, p0, p49, p137, p2, p5, p10, p12, p16, p36, p50] +guard_false(i141, descr=) [p1, p0, p49, p137, p2, p5, p10, p12, p16, p36, p50] +1657: p142 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p49, p50, descr=) -+1695: setfield_gc(p137, -3, descr=) -+1710: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p36, None] -+1710: i145 = getfield_raw(44057928, descr=) -+1718: i147 = int_lt(i145, 0) -guard_false(i147, descr=) [p1, p0, p2, p5, p10, p12, p16, p36, None] ++1693: setfield_gc(p137, -3, descr=) ++1708: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p36, None] ++1708: i145 = getfield_raw(44057928, descr=) ++1716: i147 = int_lt(i145, 0) +guard_false(i147, descr=) [p1, p0, p2, p5, p10, p12, p16, p36, None] debug_merge_point(0, ' #44 FOR_ITER') -+1728: label(p0, p1, p2, p5, p10, p12, p36, p16, i140, p49, p50, descr=TargetToken(140669221670928)) ++1726: label(p0, p1, p2, p5, p10, p12, p36, p16, i140, p49, p50, descr=TargetToken(139951894600448)) debug_merge_point(0, ' #44 FOR_ITER') -+1758: p148 = getfield_gc(p16, descr=) -+1769: guard_nonnull(p148, descr=) [p1, p0, p16, p148, p2, p5, p10, p12, p36] -+1778: i149 = getfield_gc(p16, descr=) -+1782: p150 = getfield_gc(p148, descr=) -+1786: guard_class(p150, 38655536, descr=) [p1, p0, p16, i149, p150, p148, p2, p5, p10, p12, p36] -+1798: p151 = getfield_gc(p148, descr=) -+1802: i152 = getfield_gc(p151, descr=) -+1806: i153 = uint_ge(i149, i152) -guard_false(i153, descr=) [p1, p0, p16, i149, i152, p151, p2, p5, p10, p12, p36] -+1815: p154 = getfield_gc(p151, descr=) -+1819: p155 = getarrayitem_gc(p154, i149, descr=) -+1824: guard_nonnull(p155, descr=) [p1, p0, p16, i149, p155, p2, p5, p10, p12, p36] -+1833: i156 = int_add(i149, 1) ++1756: p148 = getfield_gc(p16, descr=) ++1767: guard_nonnull(p148, descr=) [p1, p0, p16, p148, p2, p5, p10, p12, p36] ++1776: i149 = getfield_gc(p16, descr=) ++1780: p150 = getfield_gc(p148, descr=) ++1784: guard_class(p150, 38655536, descr=) [p1, p0, p16, i149, p150, p148, p2, p5, p10, p12, p36] ++1797: p151 = getfield_gc(p148, descr=) ++1801: i152 = getfield_gc(p151, descr=) ++1805: i153 = uint_ge(i149, i152) +guard_false(i153, descr=) [p1, p0, p16, i149, i152, p151, p2, p5, p10, p12, p36] ++1814: p154 = getfield_gc(p151, descr=) ++1818: p155 = getarrayitem_gc(p154, i149, descr=) ++1823: guard_nonnull(p155, descr=) [p1, p0, p16, i149, p155, p2, p5, p10, p12, p36] ++1832: i156 = int_add(i149, 1) debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+1837: p157 = getfield_gc(p0, descr=) -+1848: setfield_gc(p16, i156, descr=) -+1852: guard_value(p157, ConstPtr(ptr42), descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] -+1871: p158 = getfield_gc(p157, descr=) -+1875: guard_value(p158, ConstPtr(ptr44), descr=) [p1, p0, p158, p157, p2, p5, p10, p12, p16, p155, None] -+1894: guard_not_invalidated(, descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] ++1836: p157 = getfield_gc(p0, descr=) ++1847: setfield_gc(p16, i156, descr=) ++1851: guard_value(p157, ConstPtr(ptr42), descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] ++1870: p158 = getfield_gc(p157, descr=) ++1874: guard_value(p158, ConstPtr(ptr44), descr=) [p1, p0, p158, p157, p2, p5, p10, p12, p16, p155, None] ++1893: guard_not_invalidated(, descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #53 LOOKUP_METHOD') -+1894: p159 = getfield_gc(ConstPtr(ptr45), descr=) -+1907: guard_value(p159, ConstPtr(ptr47), descr=) [p1, p0, p159, p2, p5, p10, p12, p16, p155, None] ++1893: p159 = getfield_gc(ConstPtr(ptr45), descr=) ++1906: guard_value(p159, ConstPtr(ptr47), descr=) [p1, p0, p159, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') debug_merge_point(0, ' #62 CALL_METHOD') -+1926: i160 = force_token() -+1926: i161 = int_is_zero(i140) -guard_true(i161, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i160, p155, None] ++1925: i160 = force_token() ++1925: i161 = int_is_zero(i140) +guard_true(i161, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i160, p50, p155, None] debug_merge_point(1, ' #0 LOAD_GLOBAL') debug_merge_point(1, ' #3 LOAD_FAST') debug_merge_point(1, ' #6 LOAD_FAST') debug_merge_point(1, ' #9 CALL_FUNCTION') -+1936: i162 = getfield_gc(ConstPtr(ptr55), descr=) -+1949: i163 = int_ge(0, i162) -guard_true(i163, descr=) [p1, p0, p49, i162, p2, p5, p10, p12, p16, p50, i160, p155, None] -+1959: i164 = force_token() ++1935: i162 = getfield_gc(ConstPtr(ptr55), descr=) ++1948: i163 = int_ge(0, i162) +guard_true(i163, descr=) [p1, p0, p49, i162, p2, p5, p10, p12, p16, i160, p50, p155, None] ++1958: i164 = force_token() debug_merge_point(2, ' #0 LOAD_GLOBAL') -+1959: p165 = getfield_gc(ConstPtr(ptr60), descr=) -+1967: guard_value(p165, ConstPtr(ptr62), descr=) [p1, p0, p49, p165, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] ++1958: p165 = getfield_gc(ConstPtr(ptr60), descr=) ++1966: guard_value(p165, ConstPtr(ptr62), descr=) [p1, p0, p49, p165, p2, p5, p10, p12, p16, i164, i160, p50, p155, None] debug_merge_point(2, ' #3 LOAD_FAST') debug_merge_point(2, ' #6 LOAD_CONST') debug_merge_point(2, ' #9 BINARY_SUBSCR') @@ -1761,242 +2045,242 @@ debug_merge_point(2, ' #26 LOOKUP_METHOD') debug_merge_point(2, ' #29 LOAD_FAST') debug_merge_point(2, ' #32 CALL_METHOD') -+1980: p166 = getfield_gc(ConstPtr(ptr63), descr=) -+1993: guard_class(p166, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p166, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] ++1979: p166 = getfield_gc(ConstPtr(ptr63), descr=) ++1992: guard_class(p166, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p166, p2, p5, p10, p12, p16, i164, i160, p50, p155, None] +2005: p167 = getfield_gc(ConstPtr(ptr63), descr=) +2018: i168 = force_token() p169 = new_array(3, descr=) p170 = new_with_vtable(38637968) +2117: setfield_gc(p170, i164, descr=) setfield_gc(p49, p170, descr=) -+2170: setfield_gc(p0, i168, descr=) -+2174: setarrayitem_gc(p169, 0, ConstPtr(ptr73), descr=) -+2182: setarrayitem_gc(p169, 1, ConstPtr(ptr75), descr=) -+2196: setarrayitem_gc(p169, 2, ConstPtr(ptr174), descr=) -+2210: i175 = call_may_force(ConstClass(hash_tuple), p169, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] -+2275: guard_no_exception(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] -+2290: i176 = force_token() ++2168: setfield_gc(p0, i168, descr=) ++2172: setarrayitem_gc(p169, 0, ConstPtr(ptr73), descr=) ++2180: setarrayitem_gc(p169, 1, ConstPtr(ptr75), descr=) ++2194: setarrayitem_gc(p169, 2, ConstPtr(ptr174), descr=) ++2208: i175 = call_may_force(ConstClass(hash_tuple), p169, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, i160, p169, p155, p50] ++2273: guard_no_exception(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, i160, p169, p155, p50] ++2288: i176 = force_token() p177 = new_with_vtable(38549536) -+2360: setfield_gc(p0, i176, descr=) -+2371: setfield_gc(p177, p169, descr=) -+2382: i178 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v693___simple_call__function_l), p167, p177, i175, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p50, i160, p155] -+2440: guard_no_exception(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p50, i160, p155] -+2455: i179 = int_and(i178, -9223372036854775808) -+2471: i180 = int_is_true(i179) -guard_false(i180, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p50, i160, p155] -+2481: p181 = getfield_gc(p167, descr=) -+2492: p182 = getinteriorfield_gc(p181, i178, descr=>) -+2501: guard_nonnull_class(p182, 38793968, descr=) [p1, p0, p49, p177, p182, p170, p2, p5, p10, p12, p16, p50, i160, p155] ++2358: setfield_gc(p0, i176, descr=) ++2369: setfield_gc(p177, p169, descr=) ++2380: i178 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v693___simple_call__function_l), p167, p177, i175, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2438: guard_no_exception(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2453: i179 = int_and(i178, -9223372036854775808) ++2469: i180 = int_is_true(i179) +guard_false(i180, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2479: p181 = getfield_gc(p167, descr=) ++2490: p182 = getinteriorfield_gc(p181, i178, descr=>) ++2499: guard_nonnull_class(p182, 38793968, descr=) [p1, p0, p49, p177, p182, p170, p2, p5, p10, p12, p16, i160, p155, p50] debug_merge_point(2, ' #35 STORE_FAST') debug_merge_point(2, ' #38 LOAD_FAST') debug_merge_point(2, ' #41 LOAD_CONST') debug_merge_point(2, ' #44 COMPARE_OP') -+2519: i183 = instance_ptr_eq(ConstPtr(ptr91), p182) -guard_false(i183, descr=) [p1, p0, p49, p170, p2, p5, p10, p12, p16, p182, p177, p50, i160, p155] ++2517: i183 = instance_ptr_eq(ConstPtr(ptr91), p182) +guard_false(i183, descr=) [p1, p0, p49, p170, p2, p5, p10, p12, p16, p182, p177, i160, p155, p50] debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') debug_merge_point(2, ' #50 LOAD_FAST') debug_merge_point(2, ' #53 RETURN_VALUE') -+2532: p184 = getfield_gc(p49, descr=) -+2543: guard_isnull(p184, descr=) [p1, p0, p49, p182, p184, p170, p2, p5, p10, p12, p16, None, p177, p50, i160, p155] -+2552: i185 = getfield_gc(p49, descr=) -+2556: i186 = int_is_true(i185) -guard_false(i186, descr=) [p1, p0, p49, p182, p170, p2, p5, p10, p12, p16, None, p177, p50, i160, p155] -+2566: p187 = getfield_gc(p49, descr=) ++2530: p184 = getfield_gc(p49, descr=) ++2541: guard_isnull(p184, descr=) [p1, p0, p49, p182, p184, p170, p2, p5, p10, p12, p16, None, p177, i160, p155, p50] ++2550: i185 = getfield_gc(p49, descr=) ++2554: i186 = int_is_true(i185) +guard_false(i186, descr=) [p1, p0, p49, p182, p170, p2, p5, p10, p12, p16, None, p177, i160, p155, p50] ++2564: p187 = getfield_gc(p49, descr=) debug_merge_point(1, ' #12 LOOKUP_METHOD') -+2566: setfield_gc(p170, -3, descr=) ++2564: setfield_gc(p170, -3, descr=) debug_merge_point(1, ' #15 LOAD_FAST') debug_merge_point(1, ' #18 CALL_METHOD') -+2581: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p182, None, p50, i160, p155] -+2581: i189 = strlen(p155) -+2592: i191 = int_gt(9223372036854775807, i189) -guard_true(i191, descr=) [p1, p0, p49, p182, p155, p2, p5, p10, p12, p16, None, None, p50, i160, None] -+2611: p192 = getfield_gc_pure(p182, descr=) -+2615: i193 = getfield_gc_pure(p182, descr=) -+2619: i194 = getarrayitem_gc_pure(p192, 0, descr=) -+2623: i195 = int_eq(i194, 17) -guard_true(i195, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, i189, i193, p192, None, None, p50, i160, p155] -+2633: i196 = getarrayitem_gc_pure(p192, 2, descr=) -+2637: i197 = int_and(i196, 1) -+2644: i198 = int_is_true(i197) -guard_true(i198, descr=) [p1, p0, p49, p182, i196, p2, p5, p10, p12, p16, i189, i193, p192, None, None, p50, i160, p155] -+2654: i199 = getarrayitem_gc_pure(p192, 5, descr=) -+2658: i200 = int_gt(i199, 1) -guard_false(i200, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, i189, i193, p192, None, None, p50, i160, p155] -+2668: i201 = getarrayitem_gc_pure(p192, 1, descr=) -+2672: i202 = int_add(i201, 1) -+2676: i203 = getarrayitem_gc_pure(p192, i202, descr=) -+2681: i204 = int_eq(i203, 19) -guard_true(i204, descr=) [p1, p0, p49, p182, i202, p2, p5, p10, p12, p16, i189, i193, p192, None, None, p50, i160, p155] -+2691: i205 = int_add(i202, 1) -+2698: i206 = getarrayitem_gc_pure(p192, i205, descr=) -+2703: i207 = int_add(i202, 2) -+2707: i209 = int_lt(0, i189) -guard_true(i209, descr=) [p1, p0, p49, p182, i206, i207, p2, p5, p10, p12, p16, i189, i193, p192, None, None, p50, i160, p155] -+2717: guard_value(i207, 11, descr=) [p1, p0, p49, p182, i206, i207, p192, p2, p5, p10, p12, p16, i189, i193, None, None, None, p50, i160, p155] -+2727: guard_value(i206, 51, descr=) [p1, p0, p49, p182, i206, p192, p2, p5, p10, p12, p16, i189, i193, None, None, None, p50, i160, p155] -+2737: guard_value(p192, ConstPtr(ptr133), descr=) [p1, p0, p49, p182, p192, p2, p5, p10, p12, p16, i189, i193, None, None, None, p50, i160, p155] ++2579: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p182, None, i160, p155, p50] ++2579: i189 = strlen(p155) ++2590: i191 = int_gt(9223372036854775807, i189) +guard_true(i191, descr=) [p1, p0, p49, p182, p155, p2, p5, p10, p12, p16, None, None, i160, None, p50] ++2609: p192 = getfield_gc_pure(p182, descr=) ++2613: i193 = getfield_gc_pure(p182, descr=) ++2617: i194 = getarrayitem_gc_pure(p192, 0, descr=) ++2621: i195 = int_eq(i194, 17) +guard_true(i195, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, i160, p155, p50] ++2631: i196 = getarrayitem_gc_pure(p192, 2, descr=) ++2635: i197 = int_and(i196, 1) ++2642: i198 = int_is_true(i197) +guard_true(i198, descr=) [p1, p0, p49, p182, i196, p2, p5, p10, p12, p16, p192, i189, i193, None, None, i160, p155, p50] ++2652: i199 = getarrayitem_gc_pure(p192, 5, descr=) ++2656: i200 = int_gt(i199, 1) +guard_false(i200, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, i160, p155, p50] ++2666: i201 = getarrayitem_gc_pure(p192, 1, descr=) ++2670: i202 = int_add(i201, 1) ++2674: i203 = getarrayitem_gc_pure(p192, i202, descr=) ++2679: i204 = int_eq(i203, 19) +guard_true(i204, descr=) [p1, p0, p49, p182, i202, p2, p5, p10, p12, p16, p192, i189, i193, None, None, i160, p155, p50] ++2689: i205 = int_add(i202, 1) ++2696: i206 = getarrayitem_gc_pure(p192, i205, descr=) ++2701: i207 = int_add(i202, 2) ++2705: i209 = int_lt(0, i189) +guard_true(i209, descr=) [p1, p0, p49, p182, i206, i207, p2, p5, p10, p12, p16, p192, i189, i193, None, None, i160, p155, p50] ++2715: guard_value(i207, 11, descr=) [p1, p0, p49, p182, i206, i207, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, i160, p155, p50] ++2725: guard_value(i206, 51, descr=) [p1, p0, p49, p182, i206, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, i160, p155, p50] ++2735: guard_value(p192, ConstPtr(ptr133), descr=) [p1, p0, p49, p182, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, i160, p155, p50] debug_merge_point(2, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+2756: i210 = force_token() ++2754: i210 = force_token() p211 = new_with_vtable(38602768) p212 = new_with_vtable(38637968) -+2840: setfield_gc(p212, i160, descr=) ++2838: setfield_gc(p212, i160, descr=) setfield_gc(p49, p212, descr=) -+2888: setfield_gc(p0, i210, descr=) -+2899: setfield_gc(p211, ConstPtr(ptr133), descr=) -+2913: setfield_gc(p211, i193, descr=) -+2917: setfield_gc(p211, i189, descr=) -+2921: setfield_gc(p211, p155, descr=) -+2925: i213 = call_assembler(0, p211, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p155, p50] -+3018: guard_no_exception(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p155, p50] -+3033: guard_false(i213, descr=) [p1, p0, p49, p211, p182, p212, p2, p5, p10, p12, p16, p155, p50] ++2889: setfield_gc(p0, i210, descr=) ++2900: setfield_gc(p211, ConstPtr(ptr133), descr=) ++2914: setfield_gc(p211, i193, descr=) ++2918: setfield_gc(p211, i189, descr=) ++2922: setfield_gc(p211, p155, descr=) ++2926: i213 = call_assembler(0, p211, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p155, p50] ++3019: guard_no_exception(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p155, p50] ++3034: guard_false(i213, descr=) [p1, p0, p49, p211, p182, p212, p2, p5, p10, p12, p16, p155, p50] debug_merge_point(1, ' #21 RETURN_VALUE') -+3042: p214 = getfield_gc(p49, descr=) -+3053: guard_isnull(p214, descr=) [p1, p0, p49, p214, p212, p2, p5, p10, p12, p16, p155, p50] -+3062: i215 = getfield_gc(p49, descr=) -+3066: i216 = int_is_true(i215) -guard_false(i216, descr=) [p1, p0, p49, p212, p2, p5, p10, p12, p16, p155, p50] -+3076: p217 = getfield_gc(p49, descr=) ++3043: p214 = getfield_gc(p49, descr=) ++3054: guard_isnull(p214, descr=) [p1, p0, p49, p214, p212, p2, p5, p10, p12, p16, p155, p50] ++3063: i215 = getfield_gc(p49, descr=) ++3067: i216 = int_is_true(i215) +guard_false(i216, descr=) [p1, p0, p49, p212, p2, p5, p10, p12, p16, p155, p50] ++3077: p217 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p49, p50, descr=) -+3110: setfield_gc(p212, -3, descr=) -+3125: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] -+3125: i219 = getfield_raw(44057928, descr=) -+3133: i220 = int_lt(i219, 0) -guard_false(i220, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] ++3117: setfield_gc(p212, -3, descr=) ++3132: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] ++3132: i219 = getfield_raw(44057928, descr=) ++3140: i220 = int_lt(i219, 0) +guard_false(i220, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #44 FOR_ITER') -+3143: jump(p0, p1, p2, p5, p10, p12, p155, p16, i215, p49, p50, descr=TargetToken(140669221670928)) -+3154: --end of the loop-- -[19b7525b1e48] jit-log-opt-loop} -[19b7527a80de] {jit-backend -[19b7527cf384] {jit-backend-dump ++3150: jump(p0, p1, p2, p5, p10, p12, p155, p16, i215, p49, p50, descr=TargetToken(139951894600448)) ++3161: --end of the loop-- +[b235913901b] jit-log-opt-loop} +[b235923f653] {jit-backend +[b235925599b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be7a7 +0 488DA50000000049BBA022011BF07F00004D8B3B4983C70149BBA022011BF07F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00B01B18F07F000041FFD31D1803AD00000049BB00B01B18F07F000041FFD31D1803AE000000 -[19b7527d6140] jit-backend-dump} -[19b7527d6c7a] {jit-backend-addr -bridge out of Guard 90 has address 7ff0181be7a7 to 7ff0181be81b -[19b7527d818c] jit-backend-addr} -[19b7527d8e0a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168d67 +0 488DA50000000049BB30C3FB16497F00004D8B3B4983C70149BB30C3FB16497F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00501614497F000041FFD31D1803C600000049BB00501614497F000041FFD31D1803C7000000 +[b235925dd75] jit-backend-dump} +[b235925e447] {jit-backend-addr +bridge out of Guard 115 has address 7f4914168d67 to 7f4914168ddb +[b235925f0bd] jit-backend-addr} +[b235925f6af] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be7aa +0 70FFFFFF -[19b7527da6d0] jit-backend-dump} -[19b7527db294] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168d6a +0 70FFFFFF +[b2359260251] jit-backend-dump} +[b2359260823] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be7dc +0 3B000000 -[19b7527e5f5c] jit-backend-dump} -[19b7527e6b08] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168d9c +0 3B000000 +[b2359261641] jit-backend-dump} +[b2359261c11] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be7ed +0 3E000000 -[19b7527e8170] jit-backend-dump} -[19b7527e8ed8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168dad +0 3E000000 +[b23592629c1] jit-backend-dump} +[b235926331b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bce06 +0 9D190000 -[19b7527ea4f2] jit-backend-dump} -[19b7527eb4f4] jit-backend} -[19b7527ecb80] {jit-log-opt-bridge -# bridge out of Guard 90 with 10 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141673d3 +0 90190000 +[b235926459b] jit-backend-dump} +[b2359264f3d] jit-backend} +[b2359265ccd] {jit-log-opt-bridge +# bridge out of Guard 115 with 10 ops [i0, p1] debug_merge_point(0, 're StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') +37: p2 = getfield_gc(p1, descr=) +41: i3 = strgetitem(p2, i0) +47: i5 = int_eq(i3, 51) -guard_false(i5, descr=) [i0, p1] +guard_false(i5, descr=) [i0, p1] +57: i7 = int_add(i0, 1) +61: i8 = getfield_gc_pure(p1, descr=) +65: i9 = int_lt(i7, i8) -guard_false(i9, descr=) [i7, p1] +guard_false(i9, descr=) [i7, p1] +74: finish(0, descr=) +116: --end of the loop-- -[19b752801b14] jit-log-opt-bridge} -[19b7533c96f0] {jit-backend -[19b75343127a] {jit-backend-dump +[b235927415b] jit-log-opt-bridge} +[b23597ef945] {jit-backend +[b2359822437] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be85b +0 488DA50000000049BBB822011BF07F00004D8B3B4983C70149BBB822011BF07F00004D893B4C8BBD00FFFFFF4D8B77504D85F60F85000000004D8B77284983FE000F85000000004C8BB5F0FEFFFF41F6470401740F4C89FF4C89F641BBF0C4C50041FFD34D8977404C8BB5C0FEFFFF49C74608FDFFFFFF4C8B34254845A0024983FE000F8C00000000488B042530255601488D5010483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C70088250000488B9508FFFFFF4889500849BBB81A2E18F07F00004D89DE41BD0000000041BA0400000048C78548FFFFFF2C00000048898538FFFFFF488B8D10FFFFFF48C78530FFFFFF0000000048C78528FFFFFF0000000048C78520FFFFFF0000000048C78518FFFFFF0000000049BB7ED11B18F07F000041FFE349BB00B01B18F07F000041FFD34C483C389801405044587094018001749C0103AF00000049BB00B01B18F07F000041FFD34C483C9801405044587094018001749C0103B000000049BB00B01B18F07F000041FFD34C4840504458700707740703B100000049BB00B01B18F07F000041FFD34C4840504458700707740703B2000000 -[19b75343cf02] jit-backend-dump} -[19b75343db92] {jit-backend-addr -bridge out of Guard 133 has address 7ff0181be85b to 7ff0181be999 -[19b75343f1ca] jit-backend-addr} -[19b75343ff6e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168e1b +0 488DA50000000049BB48C3FB16497F00004D8B3B4983C70149BB48C3FB16497F00004D893B4C8BBD00FFFFFF4D8B77504D85F60F85000000004D8B77284983FE000F85000000004C8BB5F8FEFFFF41F6470401740F4C89FF4C89F641BBF0C4C50041FFD34D8977404C8BB5C0FEFFFF49C74608FDFFFFFF4C8B34254845A0024983FE000F8C00000000488B042530255601488D5010483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C70088250000488B9508FFFFFF4889500849BB98BD2814497F00004D89DE41BD0000000041BA0400000048C78548FFFFFF2C00000048898538FFFFFF488B8D10FFFFFF48C78530FFFFFF0000000048C78528FFFFFF0000000048C78520FFFFFF0000000048C78518FFFFFF0000000049BB4A771614497F000041FFE349BB00501614497F000041FFD34C483C389801405044587094017C749C0103C800000049BB00501614497F000041FFD34C483C9801405044587094017C749C0103C900000049BB00501614497F000041FFD34C4840504458700707740703CA00000049BB00501614497F000041FFD34C4840504458700707740703CB000000 +[b2359827fa9] jit-backend-dump} +[b2359828573] {jit-backend-addr +bridge out of Guard 158 has address 7f4914168e1b to 7f4914168f59 +[b235982902d] jit-backend-addr} +[b23598298f5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be85e +0 E0FDFFFF -[19b753441846] jit-backend-dump} -[19b753442902] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168e1e +0 E0FDFFFF +[b235982a3e1] jit-backend-dump} +[b235982ac2d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be890 +0 05010000 -[19b753443ee6] jit-backend-dump} -[19b7534449d2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168e50 +0 05010000 +[b235982b5a9] jit-backend-dump} +[b235982ba03] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be89e +0 1B010000 -[19b753445fd4] jit-backend-dump} -[19b753446c0a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168e5e +0 1A010000 +[b235982c329] jit-backend-dump} +[b235982c79d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be8e0 +0 19010000 -[19b753448134] jit-backend-dump} -[19b753448e60] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168ea0 +0 17010000 +[b235982d073] jit-backend-dump} +[b235982d5e5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd6e6 +0 71110000 -[19b75344a342] jit-backend-dump} -[19b75344b290] jit-backend} -[19b75344cba4] {jit-log-opt-bridge -# bridge out of Guard 133 with 19 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167cb2 +0 65110000 +[b235982e017] jit-backend-dump} +[b235982e7ad] jit-backend} +[b235982f3a9] {jit-log-opt-bridge +# bridge out of Guard 158 with 19 ops [p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12] debug_merge_point(1, ' #21 RETURN_VALUE') +37: p13 = getfield_gc(p2, descr=) -+48: guard_isnull(p13, descr=) [p0, p1, p2, p13, p5, p6, p7, p8, p9, p10, p4, p12, p11, p3] ++48: guard_isnull(p13, descr=) [p0, p1, p2, p13, p5, p6, p7, p8, p9, p10, p4, p12, p11, p3] +57: i14 = getfield_gc(p2, descr=) +61: i15 = int_is_true(i14) -guard_false(i15, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p4, p12, p11, p3] +guard_false(i15, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p4, p12, p11, p3] +71: p16 = getfield_gc(p2, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p2, p12, descr=) +104: setfield_gc(p5, -3, descr=) -+119: guard_not_invalidated(, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, p11, None] ++119: guard_not_invalidated(, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, p11, None] +119: i20 = getfield_raw(44057928, descr=) +127: i22 = int_lt(i20, 0) -guard_false(i22, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, p11, None] +guard_false(i22, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, p11, None] debug_merge_point(0, ' #44 FOR_ITER') p24 = new_with_vtable(ConstClass(W_StringObject)) +200: setfield_gc(p24, p11, descr=) -+211: jump(p1, p0, p6, ConstPtr(ptr25), 0, p7, 4, 44, p8, p9, p24, p10, ConstPtr(ptr29), ConstPtr(ptr30), ConstPtr(ptr30), ConstPtr(ptr30), descr=TargetToken(140669221670848)) ++211: jump(p1, p0, p6, ConstPtr(ptr25), 0, p7, 4, 44, p8, p9, p24, p10, ConstPtr(ptr29), ConstPtr(ptr30), ConstPtr(ptr30), ConstPtr(ptr30), descr=TargetToken(139951894600368)) +318: --end of the loop-- -[19b75348e4ae] jit-log-opt-bridge} -[19b7534f934a] {jit-backend -[19b7535138dc] {jit-backend-dump +[b235984e7c1] jit-log-opt-bridge} +[b23598831dd] {jit-backend +[b2359893023] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bea1a +0 488DA50000000049BBD022011BF07F00004D8B3B4983C70149BBD022011BF07F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 -[19b7535193b4] jit-backend-dump} -[19b753519e04] {jit-backend-addr -bridge out of Guard 87 has address 7ff0181bea1a to 7ff0181bea80 -[19b75351b1e4] jit-backend-addr} -[19b75351bdde] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168fd8 +0 488DA50000000049BB60C3FB16497F00004D8B3B4983C70149BB60C3FB16497F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[b2359895b7f] jit-backend-dump} +[b2359896007] {jit-backend-addr +bridge out of Guard 112 has address 7f4914168fd8 to 7f491416903e +[b2359896843] jit-backend-addr} +[b2359896dfd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bea1d +0 70FFFFFF -[19b75351d656] jit-backend-dump} -[19b75351e352] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168fdb +0 70FFFFFF +[b2359897815] jit-backend-dump} +[b2359897f39] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcd49 +0 CD1C0000 -[19b75351f8c4] jit-backend-dump} -[19b753520680] jit-backend} -[19b753521916] {jit-log-opt-bridge -# bridge out of Guard 87 with 5 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167316 +0 BE1C0000 +[b23598989e5] jit-backend-dump} +[b2359899033] jit-backend} +[b23598998a9] {jit-log-opt-bridge +# bridge out of Guard 112 with 5 ops [i0, p1] +37: i3 = int_add(i0, 1) +44: setfield_gc(p1, i3, descr=) @@ -2004,29 +2288,29 @@ +56: setfield_gc(p1, i0, descr=) +60: finish(1, descr=) +102: --end of the loop-- -[19b75352d994] jit-log-opt-bridge} -[19b7537d0b26] {jit-backend -[19b7537ea8cc] {jit-backend-dump +[b235989f283] jit-log-opt-bridge} +[b23599d1a4b] {jit-backend +[b23599de1bf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bea80 +0 488DA50000000049BBE822011BF07F00004D8B3B4983C70149BBE822011BF07F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 -[19b7537efeca] jit-backend-dump} -[19b7537f0908] {jit-backend-addr -bridge out of Guard 89 has address 7ff0181bea80 to 7ff0181beae6 -[19b7537f1db4] jit-backend-addr} -[19b7537f2966] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416903e +0 488DA50000000049BB78C3FB16497F00004D8B3B4983C70149BB78C3FB16497F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425D0D1550141BBD01BF30041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[b23599e0c2d] jit-backend-dump} +[b23599e10a3] {jit-backend-addr +bridge out of Guard 114 has address 7f491416903e to 7f49141690a4 +[b23599e195d] jit-backend-addr} +[b23599e1eb3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bea83 +0 70FFFFFF -[19b7537f42c2] jit-backend-dump} -[19b7537f4ffa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169041 +0 70FFFFFF +[b23599e29ad] jit-backend-dump} +[b23599e2fdd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bcdf5 +0 871C0000 -[19b7537f6704] jit-backend-dump} -[19b7537f7448] jit-backend} -[19b7537f8582] {jit-log-opt-bridge -# bridge out of Guard 89 with 5 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141673c2 +0 781C0000 +[b23599e3a45] jit-backend-dump} +[b23599e403f] jit-backend} +[b23599e4823] {jit-log-opt-bridge +# bridge out of Guard 114 with 5 ops [i0, p1] +37: i3 = int_add(i0, 1) +44: setfield_gc(p1, i3, descr=) @@ -2034,159 +2318,159 @@ +56: setfield_gc(p1, i0, descr=) +60: finish(1, descr=) +102: --end of the loop-- -[19b7538043ae] jit-log-opt-bridge} -[19b753864132] {jit-backend-dump +[b23599ea139] jit-log-opt-bridge} +[b2359a1b785] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc76a +0 E986030000 -[19b75386734e] jit-backend-dump} -[19b753867d62] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166d1d +0 E9A0030000 +[b2359a1d169] jit-backend-dump} +[b2359a1d6cb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bc8d0 +0 E9F6020000 -[19b753869808] jit-backend-dump} -[19b75386a4b0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914166e80 +0 E913030000 +[b2359a23d25] jit-backend-dump} +[b2359a24497] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd250 +0 E9D60B0000 -[19b75386bb42] jit-backend-dump} -[19b753877044] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416781c +0 E9DD0B0000 +[b2359a24f9f] jit-backend-dump} +[b2359a253b1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd2a6 +0 E9F70B0000 -[19b753878cbe] jit-backend-dump} -[19b753879672] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167872 +0 E9FE0B0000 +[b2359a25ead] jit-backend-dump} +[b2359a2650b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd51e +0 E93B0B0000 -[19b75387ad2e] jit-backend-dump} -[19b75387b61c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167aea +0 E93E0B0000 +[b2359a26f5d] jit-backend-dump} +[b2359a27471] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd741 +0 E92E0B0000 -[19b75387cc18] jit-backend-dump} -[19b75387d5c0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167d0b +0 E9240B0000 +[b2359a27d99] jit-backend-dump} +[b2359a282bf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bd7f9 +0 E9560B0000 -[19b75387ed24] jit-backend-dump} -[19b75387f702] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914167dc4 +0 E94B0B0000 +[b2359a28bd3] jit-backend-dump} +[b2359a29195] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdaa8 +0 E9AB0A0000 -[19b753880cc8] jit-backend-dump} -[19b7538817a8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168072 +0 E9A10A0000 +[b2359a29b6b] jit-backend-dump} +[b2359a2a075] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bdcc8 +0 E9A40A0000 -[19b753882d9e] jit-backend-dump} -[19b7538836c8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416829b +0 E9910A0000 +[b2359a2ab3f] jit-backend-dump} +[b2359a2b029] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181be8d2 +0 E909010000 -[19b753884cca] jit-backend-dump} -[19b7540dd168] {jit-backend -[19b7541b0878] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914168e92 +0 E907010000 +[b2359a2b931] jit-backend-dump} +[b2359f69dd7] {jit-backend +[b2359fce6a9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181beae6 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63B31B18F07F000041FFD3554889E5534154415541564157488DA50000000049BB0023011BF07F00004D8B3B4983C70149BB0023011BF07F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284889B570FFFFFF498B70304C89A568FFFFFF4D8B60384889BD60FFFFFF498B78404D8B40484C89BD58FFFFFF4C898D50FFFFFF48898548FFFFFF48898D40FFFFFF4C89A538FFFFFF4889BD30FFFFFF4C898528FFFFFF49BB1823011BF07F00004D8B034983C00149BB1823011BF07F00004D89034983FA050F8500000000813E806300000F85000000004C8B56104D85D20F84000000004C8B4608498B7A10813F582D03000F85000000004D8B5208498B7A084D8B62104D8B52184983F8000F8C000000004D39D00F8D000000004C89C14D0FAFC44889F84C01C74883C10148894E084983FD000F85000000004883FB017206813BF82200000F85000000004883FA017206813AF82200000F85000000004C8B6A084D89E84901FD0F80000000004C8B4B084D01E90F80000000004C8B2C254845A0024983FD000F8C0000000049BB701B2E18F07F00004D39DE0F850000000048899520FFFFFF4889BD18FFFFFF49BB3023011BF07F0000498B3B4883C70149BB3023011BF07F000049893B4C39D10F8D000000004889CF490FAFCC4889C24801C84883C70148897E084C89C14901C00F80000000004D89CE4D01C10F80000000004C8B34254845A0024983FE000F8C0000000048898518FFFFFF4889D04989C84889F9E985FFFFFF49BB00B01B18F07F000041FFD32940484C383544510C085458185C606403B300000049BB00B01B18F07F000041FFD34048184C3835440C0854585C606403B400000049BB00B01B18F07F000041FFD3404818284C3835440C0854585C606403B500000049BB00B01B18F07F000041FFD3404818211C284C3835440C0854585C606403B600000049BB00B01B18F07F000041FFD34048182129311D4C3835440C0854585C606403B700000049BB00B01B18F07F000041FFD340481821311D4C3835440C0854585C606403B800000049BB00B01B18F07F000041FFD33540484C38440C0854581860641D03B900000049BB00B01B18F07F000041FFD340480C4C384408581860641D03BA00000049BB00B01B18F07F000041FFD34048084C38440C581860641D03BB00000049BB00B01B18F07F000041FFD3404808354C38440C58181D03BC00000049BB00B01B18F07F000041FFD340480C254C3844085818351D03BD00000049BB00B01B18F07F000041FFD340484C384408581825071D03BE00000049BB00B01B18F07F000041FFD340484C384408581825071D03BF00000049BB00B01B18F07F000041FFD34048384C4408581825071D03C000000049BB00B01B18F07F000041FFD34048180531014C446858256D03C100000049BB00B01B18F07F000041FFD3404868214C44581801250703C200000049BB00B01B18F07F000041FFD34048254C446858182101390703C300000049BB00B01B18F07F000041FFD340484C44685818250701070703C400000049BB00B01B18F07F000041FFD340484C44685818250701070703C5000000 -[19b7541c5aca] jit-backend-dump} -[19b7541c66ee] {jit-backend-addr -Loop 5 ( #38 FOR_ITER) has address 7ff0181beb1c to 7ff0181bed49 (bootstrap 7ff0181beae6) -[19b7541c85f0] jit-backend-addr} -[19b7541c959e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141690a4 +0 488B04254045A0024829E0483B0425E03C5101760D49BB63531614497F000041FFD3554889E5534154415541564157488DA50000000049BB90C3FB16497F00004D8B3B4983C70149BB90C3FB16497F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848898D70FFFFFF498B483048898D68FFFFFF498B483848899560FFFFFF498B50404D8B40484889BD58FFFFFF4889B550FFFFFF4C89BD48FFFFFF4C89A540FFFFFF4C898D38FFFFFF48898530FFFFFF48898D28FFFFFF48899520FFFFFF4C898518FFFFFF49BBA8C3FB16497F00004D8B034983C00149BBA8C3FB16497F00004D89034983FA050F85000000004C8B9568FFFFFF41813A806300000F85000000004D8B42104D85C00F8400000000498B5208498B48108139582D03000F85000000004D8B4008498B4808498B40104D8B40184883FA000F8C000000004C39C20F8D000000004989D1480FAFD04989CC4801D14983C1014D894A084983FD000F85000000004883FB017206813BF82200000F85000000004C8BAD60FFFFFF4983FD01720841817D00F82200000F8500000000498B55084989D74801CA0F8000000000488B73084801D60F8000000000488B14254845A0024883FA000F8C0000000049BB50BE2814497F00004D39DE0F850000000048898D10FFFFFF49BBC0C3FB16497F0000498B0B4883C10149BBC0C3FB16497F000049890B4D39C10F8D000000004C89C94C0FAFC84D89E54D01CC4883C10149894A084D89F94D01E70F80000000004989F64C01FE0F80000000004C8B34254845A0024983FE000F8C000000004C89A510FFFFFF4D89EC4D89CF4989C9E985FFFFFF49BB00501614497F000041FFD329504C543835585D0C4860404464686C03CC00000049BB00501614497F000041FFD3504C28543835580C48604064686C03CD00000049BB00501614497F000041FFD3504C2820543835580C48604064686C03CE00000049BB00501614497F000041FFD3504C28090420543835580C48604064686C03CF00000049BB00501614497F000041FFD3504C2809210105543835580C48604064686C03D000000049BB00501614497F000041FFD3504C28090105543835580C48604064686C03D100000049BB00501614497F000041FFD335504C5438580C48604028686C0503D200000049BB00501614497F000041FFD3504C0C543858484028686C0503D300000049BB00501614497F000041FFD3504C345438580C4028686C0503D400000049BB00501614497F000041FFD3504C34095438580C40280503D500000049BB00501614497F000041FFD3504C0C19543858344028090503D600000049BB00501614497F000041FFD3504C54385834402819070503D700000049BB00501614497F000041FFD3504C54385834402819070503D800000049BB00501614497F000041FFD3504C38545834402819070503D900000049BB00501614497F000041FFD3504C2825013154584840197103DA00000049BB00501614497F000041FFD3504C483D5458402831190703DB00000049BB00501614497F000041FFD3504C1954584840283D31390703DC00000049BB00501614497F000041FFD3504C5458484028190731070703DD00000049BB00501614497F000041FFD3504C5458484028190731070703DE000000 +[b2359fd8aed] jit-backend-dump} +[b2359fd90c3] {jit-backend-addr +Loop 7 ( #38 FOR_ITER) has address 7f49141690da to 7f491416931f (bootstrap 7f49141690a4) +[b2359fd9f95] jit-backend-addr} +[b2359fda533] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181beb18 +0 10FFFFFF -[19b7541caea6] jit-backend-dump} -[19b7541cbdee] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141690d6 +0 10FFFFFF +[b2359fe22ef] jit-backend-dump} +[b2359fe2dc1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bebe7 +0 5E010000 -[19b7541cd570] jit-backend-dump} -[19b7541ce038] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141691b3 +0 68010000 +[b2359fe3943] jit-backend-dump} +[b2359fe3e5d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bebf3 +0 74010000 -[19b7541de34c] jit-backend-dump} -[19b7541df048] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141691c7 +0 76010000 +[b2359fe48e9] jit-backend-dump} +[b2359fe4db1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec00 +0 87010000 -[19b7541e0722] jit-backend-dump} -[19b7541e1166] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141691d4 +0 89010000 +[b2359fe565f] jit-backend-dump} +[b2359fe5a45] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec14 +0 94010000 -[19b7541e25ca] jit-backend-dump} -[19b7541e2ee8] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141691e8 +0 96010000 +[b2359fe62cb] jit-backend-dump} +[b2359fe66ad] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec2e +0 9D010000 -[19b7541e44d8] jit-backend-dump} -[19b7541e4f16] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169202 +0 9F010000 +[b2359fe6f21] jit-backend-dump} +[b2359fe7413] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec37 +0 B8010000 -[19b7541e6464] jit-backend-dump} -[19b7541e6e00] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416920b +0 BA010000 +[b2359fe7df5] jit-backend-dump} +[b2359fe82e7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec56 +0 BC010000 -[19b7541e82ac] jit-backend-dump} -[19b7541e8be2] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416922a +0 BE010000 +[b2359fe8b5f] jit-backend-dump} +[b2359fe8f71] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec68 +0 CA010000 -[19b7541ea02e] jit-backend-dump} -[19b7541ea95e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416923c +0 CC010000 +[b2359fe97e9] jit-backend-dump} +[b2359fe9bdd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec7a +0 D6010000 -[19b7541ebe04] jit-backend-dump} -[19b7541ec740] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169257 +0 CF010000 +[b2359fea453] jit-backend-dump} +[b2359fea821] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec8a +0 E4010000 -[19b7541edcf4] jit-backend-dump} -[19b7541ee714] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169267 +0 DD010000 +[b2359feb237] jit-backend-dump} +[b2359feb729] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bec97 +0 F4010000 -[19b7541efc74] jit-backend-dump} -[19b7541f08fe] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169274 +0 ED010000 +[b2359fec123] jit-backend-dump} +[b2359fec739] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181beca9 +0 1D020000 -[19b7541f1d62] jit-backend-dump} -[19b7541f26aa] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169286 +0 16020000 +[b2359fecfe5] jit-backend-dump} +[b2359fed3c3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181becbc +0 27020000 -[19b7541f3b9e] jit-backend-dump} -[19b7541f450a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169299 +0 20020000 +[b2359fedc8d] jit-backend-dump} +[b2359fee073] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181becf1 +0 0F020000 -[19b7541f59da] jit-backend-dump} -[19b7541f63dc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141692c7 +0 0F020000 +[b2359fee915] jit-backend-dump} +[b2359feedf1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bed12 +0 0C020000 -[19b7541f79b4] jit-backend-dump} -[19b7541f8410] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141692e8 +0 0C020000 +[b2359fef873] jit-backend-dump} +[b2359fefd5f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bed1e +0 1D020000 -[19b7541f98f2] jit-backend-dump} -[19b7541fa2f4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141692f4 +0 1D020000 +[b2359ff0697] jit-backend-dump} +[b2359ff0adb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bed30 +0 47020000 -[19b7541fb788] jit-backend-dump} -[19b7541fcbf8] jit-backend} -[19b7542001f8] {jit-log-opt-loop -# Loop 5 ( #38 FOR_ITER) : loop with 86 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169306 +0 47020000 +[b2359ff136b] jit-backend-dump} +[b2359ff1b69] jit-backend} +[b2359ff38cf] {jit-log-opt-loop +# Loop 7 ( #38 FOR_ITER) : loop with 86 ops [p0, p1] +84: p2 = getfield_gc(p0, descr=) +88: p3 = getfield_gc(p0, descr=) @@ -2204,246 +2488,252 @@ +157: p22 = getarrayitem_gc(p8, 6, descr=) +168: p24 = getarrayitem_gc(p8, 7, descr=) +172: p25 = getfield_gc(p0, descr=) -+172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140669221673808)) ++172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(139951894070880)) debug_merge_point(0, ' #38 FOR_ITER') -+251: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] -+261: guard_class(p18, 38562496, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+273: p28 = getfield_gc(p18, descr=) -+277: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+286: i29 = getfield_gc(p18, descr=) -+290: p30 = getfield_gc(p28, descr=) -+294: guard_class(p30, 38745240, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+306: p32 = getfield_gc(p28, descr=) -+310: i33 = getfield_gc_pure(p32, descr=) -+314: i34 = getfield_gc_pure(p32, descr=) -+318: i35 = getfield_gc_pure(p32, descr=) -+322: i37 = int_lt(i29, 0) -guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+332: i38 = int_ge(i29, i35) -guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+341: i39 = int_mul(i29, i34) -+348: i40 = int_add(i33, i39) -+354: i42 = int_add(i29, 1) -+358: setfield_gc(p18, i42, descr=) -+362: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] ++265: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++275: guard_class(p18, 38562496, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++295: p28 = getfield_gc(p18, descr=) ++299: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++308: i29 = getfield_gc(p18, descr=) ++312: p30 = getfield_gc(p28, descr=) ++316: guard_class(p30, 38745240, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++328: p32 = getfield_gc(p28, descr=) ++332: i33 = getfield_gc_pure(p32, descr=) ++336: i34 = getfield_gc_pure(p32, descr=) ++340: i35 = getfield_gc_pure(p32, descr=) ++344: i37 = int_lt(i29, 0) +guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++354: i38 = int_ge(i29, i35) +guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++363: i39 = int_mul(i29, i34) ++370: i40 = int_add(i33, i39) ++376: i42 = int_add(i29, 1) ++380: setfield_gc(p18, i42, descr=) ++384: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #41 STORE_FAST') debug_merge_point(0, ' #44 LOAD_FAST') -+372: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p12, p16, p18, p22, p24, i40] ++394: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p12, p16, p18, p22, p24, i40] debug_merge_point(0, ' #47 LOAD_FAST') -+390: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, p5, p10, p16, p18, p22, p24, i40] ++412: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, p5, p10, p16, p18, p22, p24, i40] debug_merge_point(0, ' #50 LOAD_FAST') debug_merge_point(0, ' #53 BINARY_ADD') -+408: i46 = getfield_gc_pure(p12, descr=) -+412: i47 = int_add_ovf(i46, i40) -guard_no_overflow(, descr=) [p1, p0, p12, i47, p2, p3, p5, p10, p16, p18, i40] ++439: i46 = getfield_gc_pure(p12, descr=) ++443: i47 = int_add_ovf(i46, i40) +guard_no_overflow(, descr=) [p1, p0, p12, i47, p2, p3, p5, p10, p16, p18, i40] debug_merge_point(0, ' #54 INPLACE_ADD') -+424: i48 = getfield_gc_pure(p10, descr=) -+428: i49 = int_add_ovf(i48, i47) -guard_no_overflow(, descr=) [p1, p0, p10, i49, p2, p3, p5, p12, p16, p18, i47, i40] ++455: i48 = getfield_gc_pure(p10, descr=) ++459: i49 = int_add_ovf(i48, i47) +guard_no_overflow(, descr=) [p1, p0, p10, i49, p2, p3, p5, p12, p16, p18, i47, i40] debug_merge_point(0, ' #55 STORE_FAST') debug_merge_point(0, ' #58 JUMP_ABSOLUTE') -+437: guard_not_invalidated(, descr=) [p1, p0, p2, p3, p5, p12, p16, p18, i49, None, i40] -+437: i52 = getfield_raw(44057928, descr=) -+445: i54 = int_lt(i52, 0) -guard_false(i54, descr=) [p1, p0, p2, p3, p5, p12, p16, p18, i49, None, i40] -+455: guard_value(p3, ConstPtr(ptr55), descr=) [p1, p0, p3, p2, p5, p12, p16, p18, i49, None, i40] ++468: guard_not_invalidated(, descr=) [p1, p0, p2, p3, p5, p12, p16, p18, i49, None, i40] ++468: i52 = getfield_raw(44057928, descr=) ++476: i54 = int_lt(i52, 0) +guard_false(i54, descr=) [p1, p0, p2, p3, p5, p12, p16, p18, i49, None, i40] ++486: guard_value(p3, ConstPtr(ptr55), descr=) [p1, p0, p3, p2, p5, p12, p16, p18, i49, None, i40] debug_merge_point(0, ' #38 FOR_ITER') -+474: label(p0, p1, p2, p5, i49, p12, i40, p16, p18, i42, i35, i34, i33, i46, descr=TargetToken(140669221673888)) ++505: label(p0, p1, p2, p5, i49, p12, i40, p16, p18, i42, i35, i34, i33, i46, descr=TargetToken(139951894070960)) debug_merge_point(0, ' #38 FOR_ITER') -+518: i56 = int_ge(i42, i35) -guard_false(i56, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p16, i49, i40] -+527: i57 = int_mul(i42, i34) -+534: i58 = int_add(i33, i57) -+540: i59 = int_add(i42, 1) ++542: i56 = int_ge(i42, i35) +guard_false(i56, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p16, i49, i40] ++551: i57 = int_mul(i42, i34) ++558: i58 = int_add(i33, i57) ++564: i59 = int_add(i42, 1) debug_merge_point(0, ' #41 STORE_FAST') debug_merge_point(0, ' #44 LOAD_FAST') debug_merge_point(0, ' #47 LOAD_FAST') debug_merge_point(0, ' #50 LOAD_FAST') debug_merge_point(0, ' #53 BINARY_ADD') -+544: setfield_gc(p18, i59, descr=) -+548: i60 = int_add_ovf(i46, i58) -guard_no_overflow(, descr=) [p1, p0, p12, i60, p2, p5, p16, p18, i58, i49, None] ++568: setfield_gc(p18, i59, descr=) ++572: i60 = int_add_ovf(i46, i58) +guard_no_overflow(, descr=) [p1, p0, p12, i60, p2, p5, p16, p18, i58, i49, None] debug_merge_point(0, ' #54 INPLACE_ADD') -+560: i61 = int_add_ovf(i49, i60) -guard_no_overflow(, descr=) [p1, p0, i61, p2, p5, p12, p16, p18, i60, i58, i49, None] ++584: i61 = int_add_ovf(i49, i60) +guard_no_overflow(, descr=) [p1, p0, i61, p2, p5, p12, p16, p18, i60, i58, i49, None] debug_merge_point(0, ' #55 STORE_FAST') debug_merge_point(0, ' #58 JUMP_ABSOLUTE') -+572: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p16, p18, i61, None, i58, None, None] -+572: i62 = getfield_raw(44057928, descr=) -+580: i63 = int_lt(i62, 0) -guard_false(i63, descr=) [p1, p0, p2, p5, p12, p16, p18, i61, None, i58, None, None] ++596: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p16, p18, i61, None, i58, None, None] ++596: i62 = getfield_raw(44057928, descr=) ++604: i63 = int_lt(i62, 0) +guard_false(i63, descr=) [p1, p0, p2, p5, p12, p16, p18, i61, None, i58, None, None] debug_merge_point(0, ' #38 FOR_ITER') -+590: jump(p0, p1, p2, p5, i61, p12, i58, p16, p18, i59, i35, i34, i33, i46, descr=TargetToken(140669221673888)) -+611: --end of the loop-- -[19b7542ad310] jit-log-opt-loop} -[19b754b7b402] {jit-backend -[19b754f2e616] {jit-backend-dump ++614: jump(p0, p1, p2, p5, i61, p12, i58, p16, p18, i59, i35, i34, i33, i46, descr=TargetToken(139951894070960)) ++635: --end of the loop-- +[b235a03f1e7] jit-log-opt-loop} +[b235a456299] {jit-backend +[b235a6f3e61] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181befaa +0 488DA50000000049BB4823011BF07F0000498B034883C00149BB4823011BF07F0000498903488B8568FFFFFF4C8B601048C74610000000008138388F01000F8500000000488B40184883F8040F8500000000488B04254845A0024883F8000F8C00000000488B8540FFFFFF8138806300000F8500000000488B70104885F60F8400000000488B48084C8B7E1041813F582D03000F8500000000488B76084C8B7E084C8B7610488B76184883F9000F8C000000004839F10F8D000000004889CE490FAFCE4901CF4883C601488B8D60FFFFFF4C8B71084889700849BBA8CB2D18F07F00004D39DE0F8500000000498B761049BBC0CB2D18F07F00004C39DE0F85000000004C8B342500D785014981FE201288010F850000000049BB6023011BF07F0000498B034883C00149BB6023011BF07F0000498903488B042530255601488D9080000000483B142548255601761A49BB2DB21B18F07F000041FFD349BBC2B21B18F07F000041FFD3488914253025560148C700388F01004889C24883C02048C700F82200004889C14883C01048C700F82200004C8949084989C14883C01048C700F82200004D8979084989C74883C01048C700806300004989C64883C01848C7007836000048C742180400000048C742083E0000004C8962104C8BA518FFFFFF4D89670848C74010400FA10149BB4004F81AF07F00004C8958084989461041BD0000000048899568FFFFFF41BA0500000048C78550FFFFFF250000004889CB4C89CA4C89BD48FFFFFF4C89F648C78538FFFFFF0000000048C78530FFFFFF0000000048C78528FFFFFF0000000049BB701B2E18F07F00004D89DE49BBC3EB1B18F07F000041FFE349BB00B01B18F07F000041FFD34048004C3068586D2503C600000049BB00B01B18F07F000041FFD34048014C3068586D2503C700000049BB00B01B18F07F000041FFD340484C3068586D2503C800000049BB00B01B18F07F000041FFD340484C3068586D2503C900000049BB00B01B18F07F000041FFD34048004C30686D2503CA00000049BB00B01B18F07F000041FFD3404800184C30686D2503CB00000049BB00B01B18F07F000041FFD3404800053C184C30686D2503CC00000049BB00B01B18F07F000041FFD34048000519393D4C30686D2503CD00000049BB00B01B18F07F000041FFD340480005393D4C30686D2503CE00000049BB00B01B18F07F000041FFD34004384C00303D6D2503CF00000049BB00B01B18F07F000041FFD3400418384C00303D6D2503D000000049BB00B01B18F07F000041FFD34004384C00303D6D2503D1000000 -[19b754f406be] jit-backend-dump} -[19b754f41534] {jit-backend-addr -bridge out of Guard 193 has address 7ff0181befaa to 7ff0181bf212 -[19b754f42f0e] jit-backend-addr} -[19b754f43d3c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169581 +0 488DA50000000049BBD8C3FB16497F00004D8B234983C40149BBD8C3FB16497F00004D89234C8BA540FFFFFF498B44241049C742100000000041813C24388F01000F85000000004D8B6424184983FC040F85000000004C8B24254845A0024983FC000F8C000000004C8BA570FFFFFF41813C24806300000F85000000004D8B5424104D85D20F84000000004D8B4C24084D8B7A1041813F582D03000F85000000004D8B52084D8B7A084D8B72104D8B52184983F9000F8C000000004D39D10F8D000000004D89CA4D0FAFCE4D01CF4983C2014C8B8D58FFFFFF4D8B71084D8954240849BBA86B2814497F00004D39DE0F85000000004D8B561049BBC06B2814497F00004D39DA0F85000000004C8B342500D785014981FE201288010F850000000049BBF0C3FB16497F00004D8B234983C40149BBF0C3FB16497F00004D892348898508FFFFFF488B042530255601488D9080000000483B142548255601761A49BB2D521614497F000041FFD349BBC2521614497F000041FFD3488914253025560148C700388F01004889C24883C02048C700F82200004989C44883C01048C700F822000049897424084889C64883C01048C700F82200004C897E084989C74883C01048C700806300004989C14883C01848C7007836000048C742180400000048C742083E0000004C8BB508FFFFFF4C8972104C8BB510FFFFFF4D89770848C74010400FA10149BB2051F316497F00004C8958084989411049BB50BE2814497F00004D89DE41BD0000000048899540FFFFFF41BA0500000048C78538FFFFFF250000004C89E34889B560FFFFFF4C89BD30FFFFFF4C898D68FFFFFF48C78528FFFFFF0000000048C78520FFFFFF0000000048C78518FFFFFF0000000049BB8F911614497F000041FFE349BB00501614497F000041FFD3504C3054004840197103DF00000049BB00501614497F000041FFD3504C3154004840197103E000000049BB00501614497F000041FFD3504C54004840197103E100000049BB00501614497F000041FFD3504C54004840197103E200000049BB00501614497F000041FFD3504C30540048197103E300000049BB00501614497F000041FFD3504C3028540048197103E400000049BB00501614497F000041FFD3504C30253C28540048197103E500000049BB00501614497F000041FFD3504C302529393D540048197103E600000049BB00501614497F000041FFD3504C3025393D540048197103E700000049BB00501614497F000041FFD35024385430003D197103E800000049BB00501614497F000041FFD3502428385430003D197103E900000049BB00501614497F000041FFD35024385430003D197103EA000000 +[b235a6fc6cf] jit-backend-dump} +[b235a6fce13] {jit-backend-addr +bridge out of Guard 218 has address 7f4914169581 to 7f4914169809 +[b235a6fda0d] jit-backend-addr} +[b235a6fdfb1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181befad +0 90FEFFFF -[19b754f45740] jit-backend-dump} -[19b754f464de] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169584 +0 80FEFFFF +[b235a6feb1d] jit-backend-dump} +[b235a6ff177] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181befea +0 24020000 -[19b754f47c0c] jit-backend-dump} -[19b754f4868c] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141695c4 +0 41020000 +[b235a6ffcb7] jit-backend-dump} +[b235a7001ed] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181beff8 +0 31020000 -[19b754f49d60] jit-backend-dump} -[19b754f4a9e4] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141695d3 +0 4D020000 +[b235a700d4b] jit-backend-dump} +[b235a708f67] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf00a +0 54020000 -[19b754f4c088] jit-backend-dump} -[19b754f4cafc] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141695e5 +0 70020000 +[b235a709c8d] jit-backend-dump} +[b235a70a183] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf01d +0 5B020000 -[19b754f5c7f2] jit-backend-dump} -[19b754f5d524] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141695fa +0 75020000 +[b235a70ab7b] jit-backend-dump} +[b235a70b035] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf02a +0 68020000 -[19b754f5eb2c] jit-backend-dump} -[19b754f5f4fe] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169608 +0 81020000 +[b235a70ba99] jit-backend-dump} +[b235a70bf8b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf03f +0 6E020000 -[19b754f60986] jit-backend-dump} -[19b754f613a0] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416961e +0 86020000 +[b235a70c957] jit-backend-dump} +[b235a70cd49] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf059 +0 71020000 -[19b754f62954] jit-backend-dump} -[19b754f63362] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169638 +0 89020000 +[b235a70d5cf] jit-backend-dump} +[b235a70d991] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf062 +0 86020000 -[19b754f647de] jit-backend-dump} -[19b754f65246] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169641 +0 9E020000 +[b235a70e201] jit-backend-dump} +[b235a70e5f7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf092 +0 73020000 -[19b754f666ce] jit-backend-dump} -[19b754f67016] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169672 +0 8A020000 +[b235a70ee8b] jit-backend-dump} +[b235a70f28f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf0a9 +0 77020000 -[19b754f68492] jit-backend-dump} -[19b754f68d9e] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f4914169689 +0 8E020000 +[b235a70fcd7] jit-backend-dump} +[b235a7101c3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181bf0be +0 7E020000 -[19b754f6a232] jit-backend-dump} -[19b754f6af9a] {jit-backend-dump +SYS_EXECUTABLE pypy +CODE_DUMP @7f491416969e +0 95020000 +[b235a710b9f] jit-backend-dump} +[b235a71113d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE python -CODE_DUMP @7ff0181becf1 +0 B5020000 -[19b754f6c542] jit-backend-dump} -[19b754f6d6ac] jit-backend} -[19b754f6f56c] {jit-log-opt-bridge -# bridge out of Guard 193 with 61 ops +SYS_EXECUTABLE pypy +CODE_DUMP @7f49141692c7 +0 B6020000 +[b235a7119b9] jit-backend-dump} +[b235a712125] jit-backend} +[b235a7131a7] {jit-log-opt-bridge +# bridge out of Guard 218 with 61 ops [p0, p1, p2, i3, i4, i5, p6, p7, p8, p9, i10, i11] debug_merge_point(0, ' #61 POP_BLOCK') +37: p12 = getfield_gc_pure(p7, descr=) -+48: setfield_gc(p2, ConstPtr(ptr13), descr=) -+56: guard_class(p7, 38639224, descr=) [p0, p1, p7, p6, p12, p8, p9, i11, i10] -+68: i15 = getfield_gc_pure(p7, descr=) -+72: guard_value(i15, 4, descr=) [p0, p1, i15, p6, p12, p8, p9, i11, i10] ++49: setfield_gc(p2, ConstPtr(ptr13), descr=) ++57: guard_class(p7, 38639224, descr=) [p0, p1, p7, p6, p12, p8, p9, i10, i11] ++71: i15 = getfield_gc_pure(p7, descr=) ++76: guard_value(i15, 4, descr=) [p0, p1, i15, p6, p12, p8, p9, i10, i11] debug_merge_point(0, ' #62 JUMP_ABSOLUTE') -+82: guard_not_invalidated(, descr=) [p0, p1, p6, p12, p8, p9, i11, i10] -+82: i18 = getfield_raw(44057928, descr=) -+90: i20 = int_lt(i18, 0) -guard_false(i20, descr=) [p0, p1, p6, p12, p8, p9, i11, i10] ++86: guard_not_invalidated(, descr=) [p0, p1, p6, p12, p8, p9, i10, i11] ++86: i18 = getfield_raw(44057928, descr=) ++94: i20 = int_lt(i18, 0) +guard_false(i20, descr=) [p0, p1, p6, p12, p8, p9, i10, i11] debug_merge_point(0, ' #19 FOR_ITER') -+100: guard_class(p9, 38562496, descr=) [p0, p1, p9, p6, p12, p8, i11, i10] -+119: p22 = getfield_gc(p9, descr=) -+123: guard_nonnull(p22, descr=) [p0, p1, p9, p22, p6, p12, p8, i11, i10] -+132: i23 = getfield_gc(p9, descr=) -+136: p24 = getfield_gc(p22, descr=) -+140: guard_class(p24, 38745240, descr=) [p0, p1, p9, i23, p24, p22, p6, p12, p8, i11, i10] -+153: p26 = getfield_gc(p22, descr=) -+157: i27 = getfield_gc_pure(p26, descr=) -+161: i28 = getfield_gc_pure(p26, descr=) -+165: i29 = getfield_gc_pure(p26, descr=) -+169: i31 = int_lt(i23, 0) -guard_false(i31, descr=) [p0, p1, p9, i23, i29, i28, i27, p6, p12, p8, i11, i10] -+179: i32 = int_ge(i23, i29) -guard_false(i32, descr=) [p0, p1, p9, i23, i28, i27, p6, p12, p8, i11, i10] -+188: i33 = int_mul(i23, i28) -+195: i34 = int_add(i27, i33) -+198: i36 = int_add(i23, 1) ++104: guard_class(p9, 38562496, descr=) [p0, p1, p9, p6, p12, p8, i10, i11] ++125: p22 = getfield_gc(p9, descr=) ++130: guard_nonnull(p22, descr=) [p0, p1, p9, p22, p6, p12, p8, i10, i11] ++139: i23 = getfield_gc(p9, descr=) ++144: p24 = getfield_gc(p22, descr=) ++148: guard_class(p24, 38745240, descr=) [p0, p1, p9, i23, p24, p22, p6, p12, p8, i10, i11] ++161: p26 = getfield_gc(p22, descr=) ++165: i27 = getfield_gc_pure(p26, descr=) ++169: i28 = getfield_gc_pure(p26, descr=) ++173: i29 = getfield_gc_pure(p26, descr=) ++177: i31 = int_lt(i23, 0) +guard_false(i31, descr=) [p0, p1, p9, i23, i29, i28, i27, p6, p12, p8, i10, i11] ++187: i32 = int_ge(i23, i29) +guard_false(i32, descr=) [p0, p1, p9, i23, i28, i27, p6, p12, p8, i10, i11] ++196: i33 = int_mul(i23, i28) ++203: i34 = int_add(i27, i33) ++206: i36 = int_add(i23, 1) debug_merge_point(0, ' #22 STORE_FAST') debug_merge_point(0, ' #25 SETUP_LOOP') debug_merge_point(0, ' #28 LOAD_GLOBAL') -+202: p37 = getfield_gc(p1, descr=) -+213: setfield_gc(p9, i36, descr=) -+217: guard_value(p37, ConstPtr(ptr38), descr=) [p0, p1, p37, p6, p9, p12, i34, i11, i10] -+236: p39 = getfield_gc(p37, descr=) -+240: guard_value(p39, ConstPtr(ptr40), descr=) [p0, p1, p39, p37, p6, p9, p12, i34, i11, i10] -+259: p42 = getfield_gc(ConstPtr(ptr41), descr=) -+267: guard_value(p42, ConstPtr(ptr43), descr=) [p0, p1, p42, p6, p9, p12, i34, i11, i10] ++210: p37 = getfield_gc(p1, descr=) ++221: setfield_gc(p9, i36, descr=) ++226: guard_value(p37, ConstPtr(ptr38), descr=) [p0, p1, p37, p6, p9, p12, i34, i10, i11] ++245: p39 = getfield_gc(p37, descr=) ++249: guard_value(p39, ConstPtr(ptr40), descr=) [p0, p1, p39, p37, p6, p9, p12, i34, i10, i11] ++268: p42 = getfield_gc(ConstPtr(ptr41), descr=) ++276: guard_value(p42, ConstPtr(ptr43), descr=) [p0, p1, p42, p6, p9, p12, i34, i10, i11] debug_merge_point(0, ' #31 LOAD_CONST') debug_merge_point(0, ' #34 CALL_FUNCTION') debug_merge_point(0, ' #37 GET_ITER') debug_merge_point(0, ' #38 FOR_ITER') -+280: p44 = same_as(ConstPtr(ptr40)) -+280: label(p1, p0, p6, p12, i10, i34, i11, p9, descr=TargetToken(140669221269360)) ++289: p44 = same_as(ConstPtr(ptr40)) ++289: label(p1, p0, p6, p12, i10, i34, i11, p9, descr=TargetToken(139951894075920)) p46 = new_with_vtable(38639224) p48 = new_with_vtable(ConstClass(W_IntObject)) p50 = new_with_vtable(ConstClass(W_IntObject)) -+404: setfield_gc(p48, i10, descr=) ++420: setfield_gc(p48, i10, descr=) p52 = new_with_vtable(ConstClass(W_IntObject)) -+422: setfield_gc(p50, i34, descr=) ++439: setfield_gc(p50, i34, descr=) p54 = new_with_vtable(38562496) p56 = new_with_vtable(ConstClass(W_ListObject)) -+454: setfield_gc(p46, 4, descr=) -+462: setfield_gc(p46, 62, descr=) -+470: setfield_gc(p46, p12, descr=) -+474: setfield_gc(p52, i11, descr=) -+485: setfield_gc(p56, ConstPtr(ptr59), descr=) -+493: setfield_gc(p56, ConstPtr(ptr60), descr=) -+507: setfield_gc(p54, p56, descr=) -+511: jump(p1, p0, p6, ConstPtr(ptr61), 0, p46, 5, 37, p48, p50, p52, p9, p54, ConstPtr(ptr65), ConstPtr(ptr66), ConstPtr(ptr66), descr=TargetToken(140669221673808)) -+616: --end of the loop-- -[19b754fe1a0a] jit-log-opt-bridge} -[19b755f1632a] {jit-backend-counts -entry 0:4647 -TargetToken(140669174710784):4647 -TargetToken(140669174710864):9292 -entry 1:201 -TargetToken(140669174715984):201 -TargetToken(140669174716064):4468 -bridge 16:4446 -bridge 33:4268 -TargetToken(140669174718064):4268 -entry 2:1 -TargetToken(140669221669808):1 -TargetToken(140669221669888):1938 -entry 3:3173 -bridge 85:2882 -bridge 88:2074 -bridge 86:158 -entry 4:377 -TargetToken(140669221670848):527 -TargetToken(140669221670928):1411 -bridge 90:1420 -bridge 133:150 -bridge 87:50 -bridge 89:7 -entry 5:201 -TargetToken(140669221673808):9990 -TargetToken(140669221673888):998737 -bridge 193:9790 -TargetToken(140669221269360):9789 -[19b755f26b42] jit-backend-counts} ++471: setfield_gc(p46, 4, descr=) ++479: setfield_gc(p46, 62, descr=) ++487: setfield_gc(p46, p12, descr=) ++498: setfield_gc(p52, i11, descr=) ++509: setfield_gc(p56, ConstPtr(ptr59), descr=) ++517: setfield_gc(p56, ConstPtr(ptr60), descr=) ++531: setfield_gc(p54, p56, descr=) ++535: jump(p1, p0, p6, ConstPtr(ptr61), 0, p46, 5, 37, p48, p50, p52, p9, p54, ConstPtr(ptr65), ConstPtr(ptr66), ConstPtr(ptr66), descr=TargetToken(139951894070880)) ++648: --end of the loop-- +[b235a749cb7] jit-log-opt-bridge} +[b235ad8336b] {jit-backend-counts +entry 0:1 +TargetToken(139951847702960):1 +TargetToken(139951847703040):41 +entry 1:1 +TargetToken(139951847708240):1 +TargetToken(139951847708320):41 +entry 2:4647 +TargetToken(139951847709440):4647 +TargetToken(139951847709520):9292 +entry 3:201 +TargetToken(139951847710560):201 +TargetToken(139951847710640):4468 +bridge 41:4446 +bridge 58:4268 +TargetToken(139951894596208):4268 +entry 4:1 +TargetToken(139951894599248):1 +TargetToken(139951894599328):1938 +entry 5:3173 +bridge 110:2882 +bridge 113:2074 +bridge 111:158 +entry 6:377 +TargetToken(139951894600368):527 +TargetToken(139951894600448):1411 +bridge 115:1420 +bridge 158:150 +bridge 112:50 +bridge 114:7 +entry 7:201 +TargetToken(139951894070880):9990 +TargetToken(139951894070960):998737 +bridge 218:9790 +TargetToken(139951894075920):9789 +[b235ad8be63] jit-backend-counts} diff --git a/source.py b/source.py --- a/source.py +++ b/source.py @@ -3,7 +3,7 @@ def f(): i = 0 - while i < 1003: + while i < 1103: i += 1 f() @@ -13,7 +13,7 @@ def inlined_call(): i = 0 - while i < 1003: + while i < 1103: i = inner(i) inlined_call() From noreply at buildbot.pypy.org Wed Mar 28 19:58:41 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 19:58:41 +0200 (CEST) Subject: [pypy-commit] jitviewer default: merge Message-ID: <20120328175841.9903B822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r199:5a31ded0d4e8 Date: 2012-03-28 19:58 +0200 http://bitbucket.org/pypy/jitviewer/changeset/5a31ded0d4e8/ Log: merge diff --git a/_jitviewer/app.py b/_jitviewer/app.py --- a/_jitviewer/app.py +++ b/_jitviewer/app.py @@ -14,7 +14,7 @@ To produce the logfile for your program, run: - PYPYLOG=jit-log-opt,jit-backend-counts:mylogfile.log pypy myapp.py + PYPYLOG=jit-log-opt,jit-backend:mylogfile.pypylog pypy myapp.py """ import sys diff --git a/_jitviewer/templates/index.html b/_jitviewer/templates/index.html --- a/_jitviewer/templates/index.html +++ b/_jitviewer/templates/index.html @@ -22,7 +22,7 @@

    - Main title + JIT viewer
    Filter: From noreply at buildbot.pypy.org Wed Mar 28 20:24:44 2012 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Mar 2012 20:24:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: (agaynor, fijal) start working on slides Message-ID: <20120328182444.75644822B2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4164:f9c54452a43f Date: 2012-03-28 20:22 +0200 http://bitbucket.org/pypy/extradoc/changeset/f9c54452a43f/ Log: (agaynor, fijal) start working on slides diff --git a/talk/uct2012/talk.rst b/talk/uct2012/talk.rst new file mode 100644 --- /dev/null +++ b/talk/uct2012/talk.rst @@ -0,0 +1,85 @@ +Fast enough VMs in fast enough time +=================================== + +Who am I? +--------- + +* PyPy developer since 2006 + +XXX + +What is PyPy? +------------- + +* an open source project + +* a Python interpreter + +* **a framework for writing dynamic language VMs** + +* an agile project sponsored by EU and others + +What is a VM? +------------- + +* a program + +* input: a program + +* output: the result of executing that program + +What does a VM look like? +------------------------- + +* Lexical/analysis parsing (what are the symbols in the program) + +* AST construction (what is the structure of the program) + +* Bytecode compilation (optional) + +* Execution + +Where does PyPy come in? +------------------------ + +* Tools for writing these program quickly, and efficiently. + + * Helpers for things like parsing + + * Free JIT, and garbage collector + +* Mostly you write a totally normal VM in python, and it becomes magically fast + +PyPy architecture +----------------- + +* snakes all the way down + +* everything is written in Python - including JIT, GC, etc. + +* to be precise, a **subset** of Python, called RPython + +* your VM has to be implemented in RPython + +RPython - the good +------------------ + +* The good - it's mostly Python + +* Just write python and fix it later + +RPython - the bad +----------------- + +* It's restricted + +* Most dynamic features don't work, but you can employ all kinds of tricks during import + +RPython - the ugly +------------------- + +* Documentation + +* Error messages + +* Global type inference From noreply at buildbot.pypy.org Wed Mar 28 23:26:08 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 28 Mar 2012 23:26:08 +0200 (CEST) Subject: [pypy-commit] pypy default: There is no time2 module. Message-ID: <20120328212608.58BC6822B2@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54044:d154cd83511d Date: 2012-03-27 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/d154cd83511d/ Log: There is no time2 module. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -467,9 +467,9 @@ if name not in modules: modules.append(name) - # a bit of custom logic: time2 or rctime take precedence over time + # a bit of custom logic: rctime take precedence over time # XXX this could probably be done as a "requires" in the config - if ('time2' in modules or 'rctime' in modules) and 'time' in modules: + if 'rctime' in modules and 'time' in modules: modules.remove('time') if not self.config.objspace.nofaking: From noreply at buildbot.pypy.org Wed Mar 28 23:26:09 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 28 Mar 2012 23:26:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Improve documentation of parsestr() Message-ID: <20120328212609.93142822B2@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54045:f3a0dbfc3c3a Date: 2012-03-28 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/f3a0dbfc3c3a/ Log: Improve documentation of parsestr() diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -2,17 +2,25 @@ from pypy.interpreter import unicodehelper from pypy.rlib.rstring import StringBuilder -def parsestr(space, encoding, s, unicode_literals=False): - # compiler.transformer.Transformer.decode_literal depends on what - # might seem like minor details of this function -- changes here - # must be reflected there. +def parsestr(space, encoding, s, unicode_literal=False): + """Parses a string or unicode literal, and return a wrapped value. + + If encoding=iso8859-1, the source string is also in this encoding. + If encoding=None, the source string is ascii only. + In other cases, the source string is in utf-8 encoding. + + When a bytes string is returned, it will be encoded with the + original encoding. + + Yes, it's very inefficient. + Yes, CPython has very similar code. + """ # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 quote = s[ps] rawmode = False - unicode = unicode_literals # string decoration handling o = ord(quote) @@ -21,11 +29,11 @@ if quote == 'b' or quote == 'B': ps += 1 quote = s[ps] - unicode = False + unicode_literal = False elif quote == 'u' or quote == 'U': ps += 1 quote = s[ps] - unicode = True + unicode_literal = True if quote == 'r' or quote == 'R': ps += 1 quote = s[ps] @@ -46,21 +54,28 @@ 'unmatched triple quotes in literal') q -= 2 - if unicode: # XXX Py_UnicodeFlag is ignored for now + if unicode_literal: # XXX Py_UnicodeFlag is ignored for now if encoding is None or encoding == "iso-8859-1": + # 'unicode_escape' expects latin-1 bytes, string is ready. buf = s bufp = ps bufq = q u = None else: - # "\XX" may become "\u005c\uHHLL" (12 bytes) + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. lis = [] # using a list to assemble the value end = q + # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes) while ps < end: if s[ps] == '\\': lis.append(s[ps]) ps += 1 if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. lis.append("u005c") if ord(s[ps]) & 0x80: # XXX inefficient w, ps = decode_utf8(space, s, ps, end, "utf-16-be") @@ -86,13 +101,11 @@ need_encoding = (encoding is not None and encoding != "utf-8" and encoding != "iso-8859-1") - # XXX add strchr like interface to rtyper assert 0 <= ps <= q substr = s[ps : q] if rawmode or '\\' not in s[ps:]: if need_encoding: w_u = space.wrap(unicodehelper.PyUnicode_DecodeUTF8(space, substr)) - #w_v = space.wrap(space.unwrap(w_u).encode(encoding)) this works w_v = unicodehelper.PyUnicode_AsEncodedString(space, w_u, space.wrap(encoding)) return w_v else: From noreply at buildbot.pypy.org Wed Mar 28 23:26:10 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 28 Mar 2012 23:26:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove dead code. Message-ID: <20120328212610.D0B47822B2@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54046:3b48363cff78 Date: 2012-03-28 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/3b48363cff78/ Log: Remove dead code. Q: Where does it come from? A: from CPython: Python/ast.c Q: Why this code in CPython? A: In file Grammar/Grammar, keywords and operators are enclosed in quotes ('def', '+='); grammar.c contains logic to create the various tokens, and separate keywords from operators; probably in old ages the same code was used to parse the Python language as well. diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -23,21 +23,18 @@ rawmode = False # string decoration handling - o = ord(quote) - isalpha = (o>=97 and o<=122) or (o>=65 and o<=90) - if isalpha or quote == '_': - if quote == 'b' or quote == 'B': - ps += 1 - quote = s[ps] - unicode_literal = False - elif quote == 'u' or quote == 'U': - ps += 1 - quote = s[ps] - unicode_literal = True - if quote == 'r' or quote == 'R': - ps += 1 - quote = s[ps] - rawmode = True + if quote == 'b' or quote == 'B': + ps += 1 + quote = s[ps] + unicode_literal = False + elif quote == 'u' or quote == 'U': + ps += 1 + quote = s[ps] + unicode_literal = True + if quote == 'r' or quote == 'R': + ps += 1 + quote = s[ps] + rawmode = True if quote != "'" and quote != '"': raise_app_valueerror(space, 'Internal error: parser passed unquoted literal') From noreply at buildbot.pypy.org Wed Mar 28 23:35:39 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 28 Mar 2012 23:35:39 +0200 (CEST) Subject: [pypy-commit] pypy dynamic-specialized-tuple: fix, result of a bad merge Message-ID: <20120328213539.F4133822B2@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dynamic-specialized-tuple Changeset: r54047:244768707f09 Date: 2012-03-28 17:35 -0400 http://bitbucket.org/pypy/pypy/changeset/244768707f09/ Log: fix, result of a bad merge diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -399,7 +399,7 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject): - t = w_obj.getitems_copy(space) + t = w_obj.getitems_copy(self) elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() else: @@ -413,7 +413,7 @@ """ Fast paths """ if isinstance(w_obj, W_AbstractTupleObject): - t = w_obj.tolist(space) + t = w_obj.tolist(self) elif type(w_obj) is W_ListObject: if unroll: t = w_obj.getitems_unroll() From noreply at buildbot.pypy.org Thu Mar 29 00:01:30 2012 From: noreply at buildbot.pypy.org (lac) Date: Thu, 29 Mar 2012 00:01:30 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix 3 extremely minor typos/grammar Message-ID: <20120328220130.40335822B2@wyvern.cs.uni-duesseldorf.de> Author: Laura Creighton Branch: extradoc Changeset: r4165:6ef76531d696 Date: 2012-03-29 00:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/6ef76531d696/ Log: Fix 3 extremely minor typos/grammar diff --git a/talk/uct2012/talk.rst b/talk/uct2012/talk.rst --- a/talk/uct2012/talk.rst +++ b/talk/uct2012/talk.rst @@ -17,7 +17,7 @@ * **a framework for writing dynamic language VMs** -* an agile project sponsored by EU and others +* an agile project sponsored by the EU and others What is a VM? ------------- @@ -42,11 +42,11 @@ Where does PyPy come in? ------------------------ -* Tools for writing these program quickly, and efficiently. +* Tools for writing these programs quickly, and efficiently. * Helpers for things like parsing - * Free JIT, and garbage collector + * Free JIT, and garbage collectors * Mostly you write a totally normal VM in python, and it becomes magically fast From noreply at buildbot.pypy.org Thu Mar 29 00:47:08 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 29 Mar 2012 00:47:08 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: o) simplication of exception handling Message-ID: <20120328224708.CCBC5822B2@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54048:dbf598dad646 Date: 2012-03-28 15:47 -0700 http://bitbucket.org/pypy/pypy/changeset/dbf598dad646/ Log: o) simplication of exception handling o) human-readable error messages if all overloads fail o) back to 'auto' for thread safety for CINT back-end (still not working as desired) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -20,7 +20,7 @@ def identify(): return 'CINT' -threadsafe = False +threadsafe = 'auto' # force loading in global mode of core libraries, rather than linking with # them as PyPy uses various version of dlopen in various places; note that diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -51,7 +51,7 @@ return fieldptr def _is_abstract(self, space): - raise TypeError("no converter available") + raise OperationError(space.w_TypeError, space.wrap("no converter available")) def convert_argument(self, space, w_obj, address): self._is_abstract(space) @@ -140,7 +140,8 @@ try: byteptr[0] = buf.get_raw_address() except ValueError: - raise TypeError("raw buffer interface not supported") + raise OperationError(space.w_TypeError, + space.wrap("raw buffer interface not supported")) class NumericTypeConverterMixin(object): @@ -190,7 +191,8 @@ self.name = name def convert_argument(self, space, w_obj, address): - raise TypeError('no converter available for type "%s"' % self.name) + raise OperationError(space.w_TypeError, + space.wrap('no converter available for type "%s"' % self.name)) class BoolConverter(TypeConverter): @@ -200,7 +202,8 @@ def _unwrap_object(self, space, w_obj): arg = space.c_int_w(w_obj) if arg != False and arg != True: - raise ValueError("boolean value should be bool, or integer 1 or 0") + raise OperationError(space.w_ValueError, + space.wrap("boolean value should be bool, or integer 1 or 0")) return arg def convert_argument(self, space, w_obj, address): @@ -233,14 +236,16 @@ if space.isinstance_w(w_value, space.w_int): ival = space.c_int_w(w_value) if ival < 0 or 256 <= ival: - raise ValueError("char arg not in range(256)") + raise OperationError(space.w_ValueError, + space.wrap("char arg not in range(256)")) value = rffi.cast(rffi.CHAR, space.c_int_w(w_value)) else: value = space.str_w(w_value) if len(value) != 1: - raise ValueError("char expected, got string of size %d" % len(value)) + raise OperationError(space.w_ValueError, + space.wrap("char expected, got string of size %d" % len(value))) return value[0] # turn it into a "char" to the annotator def convert_argument(self, space, w_obj, address): @@ -516,8 +521,9 @@ obj.cppclass.handle, self.cpptype.handle, rawobject) obj_address = capi.direct_ptradd(rawobject, offset) return rffi.cast(capi.C_OBJECT, obj_address) - raise TypeError("cannot pass %s as %s" % - (space.type(w_obj).getname(space, "?"), self.cpptype.name)) + raise OperationError(space.w_TypeError, + space.wrap("cannot pass %s as %s" % + (space.type(w_obj).getname(space, "?"), self.cpptype.name))) def convert_argument(self, space, w_obj, address): x = rffi.cast(rffi.VOIDPP, address) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -1,5 +1,7 @@ import sys +from pypy.interpreter.error import OperationError + from pypy.rpython.lltypesystem import rffi, lltype from pypy.rlib import libffi, clibffi @@ -19,7 +21,8 @@ pass def execute(self, space, cppmethod, cppthis, num_args, args): - raise TypeError('return type not available or supported') + raise OperationError(space.w_TypeError, + space.wrap('return type not available or supported')) def execute_libffi(self, space, libffifunc, argchain): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -360,7 +363,6 @@ # currently used until proper lazy instantiation available in interp_cppyy return FunctionExecutor(space, None) - # raise TypeError("no clue what %s is" % name) _executors["void"] = VoidExecutor _executors["void*"] = PtrTypeExecutor diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -133,7 +133,8 @@ args_expected = len(self.arg_defs) args_given = len(args_w) if args_expected < args_given or args_given < self.args_required: - raise TypeError("wrong number of arguments") + raise OperationError(self.space.w_TypeError, + self.space.wrap("wrong number of arguments")) if self.arg_converters is None: self._setup(cppthis) @@ -235,7 +236,7 @@ assert lltype.typeOf(newthis) == capi.C_OBJECT try: CPPMethod.call(self, newthis, args_w) - except Exception: + except: capi.c_deallocate(self.cpptype.handle, newthis) raise return wrap_new_cppobject_nocast(self.space, None, self.cpptype, newthis, False, True) @@ -285,12 +286,16 @@ # only get here if all overloads failed ... errmsg = 'None of the overloads matched:' + if hasattr(self.space, "fake"): # FakeSpace fails errorstr (see below) + raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: return cppyyfunc.call(cppthis, args_w) + except OperationError, e: + errmsg += '\n\t'+e.errorstr(self.space) except Exception, e: - errmsg += '\n\t'+str(e) + errmsg += '\n\tException:'+str(e) raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) @@ -334,23 +339,13 @@ def get(self, w_cppinstance, w_type): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) offset = self._get_offset(cppinstance) - try: - return self.converter.from_memory(self.space, w_cppinstance, w_type, offset) - except TypeError, e: - raise OperationError(self.space.w_TypeError, self.space.wrap(str(e))) - except ValueError, e: - raise OperationError(self.space.w_ValueError, self.space.wrap(str(e))) + return self.converter.from_memory(self.space, w_cppinstance, w_type, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) offset = self._get_offset(cppinstance) - try: - self.converter.to_memory(self.space, w_cppinstance, w_value, offset) - return self.space.w_None - except TypeError, e: - raise OperationError(self.space.w_TypeError, self.space.wrap(str(e))) - except ValueError, e: - raise OperationError(self.space.w_ValueError, self.space.wrap(str(e))) + self.converter.to_memory(self.space, w_cppinstance, w_value, offset) + return self.space.w_None W_CPPDataMember.typedef = TypeDef( 'CPPDataMember', diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -25,8 +25,13 @@ typename = "type" def __init__(self, name): self.name = name + self.__name__ = name def getname(self, space, name): return self.name +class FakeException(FakeType): + def __init__(self, name): + FakeType.__init__(self, name) + self.message = name @jit.dont_look_inside def _opaque_direct_ptradd(ptr, offset): @@ -47,12 +52,12 @@ class FakeSpace(object): fake = True - w_ValueError = FakeType("ValueError") - w_TypeError = FakeType("TypeError") - w_AttributeError = FakeType("AttributeError") - w_ReferenceError = FakeType("ReferenceError") - w_NotImplementedError = FakeType("NotImplementedError") - w_RuntimeError = FakeType("RuntimeError") + w_ValueError = FakeException("ValueError") + w_TypeError = FakeException("TypeError") + w_AttributeError = FakeException("AttributeError") + w_ReferenceError = FakeException("ReferenceError") + w_NotImplementedError = FakeException("NotImplementedError") + w_RuntimeError = FakeException("RuntimeError") w_None = None w_str = FakeType("str") @@ -99,6 +104,9 @@ def exception_match(self, typ, sub): return typ is sub + def is_w(self, w_one, w_two): + return w_one is w_two + def int_w(self, w_obj): assert isinstance(w_obj, FakeInt) return w_obj.val @@ -107,11 +115,14 @@ assert isinstance(w_obj, FakeInt) return rarithmetic.r_uint(w_obj.val) - def str_w(self, w_obj): assert isinstance(w_obj, FakeString) return w_obj.val + def str(self, obj): + assert isinstance(obj, str) + return obj + c_int_w = int_w def isinstance_w(self, w_obj, w_type): @@ -121,6 +132,11 @@ def type(self, w_obj): return FakeType("fake") + def getattr(self, w_obj, w_name): + assert isinstance(w_obj, FakeException) + assert self.str_w(w_name) == "__name__" + return FakeString(w_obj.name) + def findattr(self, w_obj, w_name): return None From noreply at buildbot.pypy.org Thu Mar 29 02:47:50 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 29 Mar 2012 02:47:50 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: put C++ signature in method doc-string Message-ID: <20120329004750.E856C822B2@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54049:f343cf6ff013 Date: 2012-03-28 17:12 -0700 http://bitbucket.org/pypy/pypy/changeset/f343cf6ff013/ Log: put C++ signature in method doc-string diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/cppyy/capi/__init__.py @@ -285,6 +285,13 @@ compilation_info=backend.eci) def c_method_arg_default(cppscope, method_index, arg_index): return charp2str_free(_c_method_arg_default(cppscope, method_index, arg_index)) +_c_method_signature = rffi.llexternal( + "cppyy_method_signature", + [C_SCOPE, rffi.INT], rffi.CCHARP, + threadsafe=threadsafe, + compilation_info=backend.eci) +def c_method_signature(cppscope, method_index): + return charp2str_free(_c_method_signature(cppscope, method_index)) c_get_method = rffi.llexternal( "cppyy_get_method", diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -69,6 +69,7 @@ int cppyy_method_req_args(cppyy_scope_t scope, int method_index); char* cppyy_method_arg_type(cppyy_scope_t scope, int method_index, int arg_index); char* cppyy_method_arg_default(cppyy_scope_t scope, int method_index, int arg_index); + char* cppyy_method_signature(cppyy_scope_t scope, int method_index); cppyy_method_t cppyy_get_method(cppyy_scope_t scope, int method_index); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -216,6 +216,9 @@ conv.free_argument(rffi.cast(capi.C_OBJECT, arg_i)) capi.c_deallocate_function_args(args) + def signature(self): + return capi.c_method_signature(self.cpptype.handle, self.method_index) + def __repr__(self): return "CPPFunction(%s, %s, %r, %s)" % ( self.cpptype, self.method_index, self.executor, self.arg_defs) @@ -299,6 +302,12 @@ raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) + def signature(self): + sig = self.functions[0].signature() + for i in range(1, len(self.functions)): + sig += '\n'+self.functions[i].signature() + return self.space.wrap(sig) + def __repr__(self): return "W_CPPOverload(%s, %s)" % (self.func_name, self.functions) @@ -306,6 +315,7 @@ 'CPPOverload', is_static = interp2app(W_CPPOverload.is_static, unwrap_spec=['self']), call = interp2app(W_CPPOverload.call, unwrap_spec=['self', W_Root, 'args_w']), + signature = interp2app(W_CPPOverload.signature, unwrap_spec=['self']), ) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -57,6 +57,7 @@ def method(self, *args): return cppol.call(self, *args) method.__name__ = meth_name + method.__doc__ = cppol.signature() return method diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -11,6 +11,7 @@ #include "Reflex/TypeTemplate.h" #include +#include #include #include @@ -355,6 +356,22 @@ return cppstring_to_cstring(dflt); } +char* cppyy_method_signature(cppyy_scope_t handle, int method_index) { + Reflex::Scope s = scope_from_handle(handle); + Reflex::Member m = s.FunctionMemberAt(method_index); + Reflex::Type mt = m.TypeOf(); + std::ostringstream sig; + sig << s.Name(Reflex::SCOPED) << "::" << m.Name() << "("; + int nArgs = m.FunctionParameterSize(); + for (int iarg = 0; iarg < nArgs; ++iarg) { + sig << mt.FunctionParameterAt(iarg).Name(Reflex::SCOPED|Reflex::QUALIFIED); + if (iarg != nArgs-1) + sig << ", "; + } + sig << ")" << std::ends; + return cppstring_to_cstring(sig.str()); +} + cppyy_method_t cppyy_get_method(cppyy_scope_t handle, int method_index) { Reflex::Scope s = scope_from_handle(handle); Reflex::Member m = s.FunctionMemberAt(method_index); diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/cppyy/test/fragile.h --- a/pypy/module/cppyy/test/fragile.h +++ b/pypy/module/cppyy/test/fragile.h @@ -70,4 +70,9 @@ extern I gI; +class J { +public: + int method1(int, double) { return 0; } +}; + } // namespace fragile diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -158,3 +158,22 @@ g = cppyy.gbl.fragile.gI assert not g + + def test10_documentation(self): + """Check contents of documentation""" + + import cppyy + + assert cppyy.gbl.fragile == cppyy.gbl.fragile + fragile = cppyy.gbl.fragile + + d = fragile.D() + try: + d.check(None) # raises TypeError + assert 0 + except TypeError, e: + assert "TypeError: wrong number of arguments" in str(e) + + j = fragile.J() + assert fragile.J.method1.__doc__ == j.method1.__doc__ + assert j.method1.__doc__ == "fragile::J::method1(int, double)" From noreply at buildbot.pypy.org Thu Mar 29 02:47:52 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 29 Mar 2012 02:47:52 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: doc strings with C++ signatures for CINT backend Message-ID: <20120329004752.530C3822B2@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54050:97fa84af9049 Date: 2012-03-28 17:29 -0700 http://bitbucket.org/pypy/pypy/changeset/97fa84af9049/ Log: doc strings with C++ signatures for CINT backend diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -525,6 +525,21 @@ return cppstring_to_cstring(""); } +char* cppyy_method_signature(cppyy_scope_t handle, int method_index) { + TFunction* f = type_get_method(handle, method_index); + TClassRef cr = type_from_handle(handle); + std::ostringstream sig; + sig << cr.GetClassName() << "::" << f->GetName() << "("; + int nArgs = f->GetNargs(); + for (int iarg = 0; iarg < nArgs; ++iarg) { + sig << ((TMethodArg*)f->GetListOfMethodArgs()->At(iarg))->GetFullTypeName(); + if (iarg != nArgs-1) + sig << ", "; + } + sig << ")" << std::ends; + return cppstring_to_cstring(sig.str()); +} + cppyy_method_t cppyy_get_method(cppyy_scope_t handle, int method_index) { TFunction* f = type_get_method(handle, method_index); return (cppyy_method_t)f->InterfaceMethod(); diff --git a/pypy/module/cppyy/test/fragile_LinkDef.h b/pypy/module/cppyy/test/fragile_LinkDef.h --- a/pypy/module/cppyy/test/fragile_LinkDef.h +++ b/pypy/module/cppyy/test/fragile_LinkDef.h @@ -15,6 +15,7 @@ #pragma link C++ class fragile::G; #pragma link C++ class fragile::H; #pragma link C++ class fragile::I; +#pragma link C++ class fragile::J; #pragma link C++ variable fragile::gI; From noreply at buildbot.pypy.org Thu Mar 29 02:47:53 2012 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 29 Mar 2012 02:47:53 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: really pretty error messages Message-ID: <20120329004753.8B219822B2@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r54051:6c98c84b301b Date: 2012-03-28 17:47 -0700 http://bitbucket.org/pypy/pypy/changeset/6c98c84b301b/ Log: really pretty error messages diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -288,7 +288,7 @@ pass # only get here if all overloads failed ... - errmsg = 'None of the overloads matched:' + errmsg = 'none of the %d overloaded methods succeeded. Full details:' % len(self.functions) if hasattr(self.space, "fake"): # FakeSpace fails errorstr (see below) raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) for i in range(len(self.functions)): @@ -296,9 +296,11 @@ try: return cppyyfunc.call(cppthis, args_w) except OperationError, e: - errmsg += '\n\t'+e.errorstr(self.space) + errmsg += '\n '+cppyyfunc.signature()+' =>\n' + errmsg += ' '+e.errorstr(self.space) except Exception, e: - errmsg += '\n\tException:'+str(e) + errmsg += '\n '+cppyyfunc.signature()+' =>\n' + errmsg += ' Exception:'+str(e) raise OperationError(self.space.w_TypeError, self.space.wrap(errmsg)) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -172,8 +172,22 @@ d.check(None) # raises TypeError assert 0 except TypeError, e: + assert "fragile::D::check()" in str(e) assert "TypeError: wrong number of arguments" in str(e) + try: + d.overload(None) # raises TypeError + assert 0 + except TypeError, e: + assert "fragile::D::overload()" in str(e) + assert "TypeError: wrong number of arguments" in str(e) + assert "fragile::D::overload(fragile::no_such_class*)" in str(e) + assert "TypeError: no converter available for type \"fragile::no_such_class*\"" in str(e) + assert "fragile::D::overload(char, int)" in str(e) + assert "TypeError: expected string, got NoneType object" in str(e) + assert "fragile::D::overload(int, fragile::no_such_class*)" in str(e) + assert "TypeError: unsupported operand type for int(): 'NoneType'" in str(e) + j = fragile.J() assert fragile.J.method1.__doc__ == j.method1.__doc__ assert j.method1.__doc__ == "fragile::J::method1(int, double)" From noreply at buildbot.pypy.org Thu Mar 29 04:49:12 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 04:49:12 +0200 (CEST) Subject: [pypy-commit] pypy default: if start - stop <= 0, return a empty string Message-ID: <20120329024912.E5EFB822B2@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54052:3b13b7c4c388 Date: 2012-03-28 22:48 -0400 http://bitbucket.org/pypy/pypy/changeset/3b13b7c4c388/ Log: if start - stop <= 0, return a empty string diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,7 +765,8 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 - assert lgt >= 0 + if lgt <= 0: + return s1.empty() newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -477,7 +477,8 @@ s1 = s[:3] s2 = s[3:] s3 = s[3:10] - return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s + s4 = s[42:44] + return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s and s4 == "" res = self.interpret(fn, [0]) assert res From noreply at buildbot.pypy.org Thu Mar 29 05:00:00 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 05:00:00 +0200 (CEST) Subject: [pypy-commit] pypy default: get correct behavior on JVM Message-ID: <20120329030000.7ECD2822B2@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54053:ace5f809040a Date: 2012-03-28 22:59 -0400 http://bitbucket.org/pypy/pypy/changeset/ace5f809040a/ Log: get correct behavior on JVM diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -222,6 +222,8 @@ length = s.ll_strlen() if stop > length: stop = length + if start > stop: + start = stop return s.ll_substring(start, stop-start) def ll_stringslice_minusone(s): From noreply at buildbot.pypy.org Thu Mar 29 05:46:28 2012 From: noreply at buildbot.pypy.org (taavi_burns) Date: Thu, 29 Mar 2012 05:46:28 +0200 (CEST) Subject: [pypy-commit] pypy numpy-ufuncs3: Add square Message-ID: <20120329034628.11367822B2@wyvern.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: numpy-ufuncs3 Changeset: r54054:128dd4bc1819 Date: 2012-03-28 23:23 -0400 http://bitbucket.org/pypy/pypy/changeset/128dd4bc1819/ Log: Add square diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -122,6 +122,7 @@ ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), + ('square', 'square'), ("tan", "tan"), ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -549,6 +549,7 @@ ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ('square', 'square', 1, {'promote_to_float': True}), ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -455,6 +455,19 @@ assert math.isnan(sqrt(-1)) assert math.isnan(sqrt(nan)) + def test_square(self): + import math + from _numpypy import square + + nan, inf, ninf = float("nan"), float("inf"), float("-inf") + + assert math.isnan(square(nan)) + assert math.isinf(square(inf)) + assert math.isinf(square(ninf)) + assert square(ninf) > 0 + assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)] + assert math.isinf(square(1e300)) + def test_radians(self): import math from _numpypy import radians, array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -741,6 +741,10 @@ except ValueError: return rfloat.NAN + @simple_unary_op + def square(self, v): + return v*v + @raw_unary_op def isnan(self, v): return rfloat.isnan(v) From noreply at buildbot.pypy.org Thu Mar 29 05:46:29 2012 From: noreply at buildbot.pypy.org (taavi_burns) Date: Thu, 29 Mar 2012 05:46:29 +0200 (CEST) Subject: [pypy-commit] pypy numpy-ufuncs3: Add fmax and fmin Message-ID: <20120329034629.56DD1822B2@wyvern.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: numpy-ufuncs3 Changeset: r54055:3fff4b2e407c Date: 2012-03-28 23:24 -0400 http://bitbucket.org/pypy/pypy/changeset/3fff4b2e407c/ Log: Add fmax and fmin diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -99,6 +99,8 @@ ("exp2", "exp2"), ("expm1", "expm1"), ("fabs", "fabs"), + ("fmax", "fmax"), + ("fmin", "fmin"), ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -541,6 +541,8 @@ ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmax", "fmax", 2, {"promote_to_float": True}), + ("fmin", "fmin", 2, {"promote_to_float": True}), ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -135,6 +135,38 @@ assert fabs(float('-inf')) == float('inf') assert isnan(fabs(float('nan'))) + def test_fmax(self): + from _numpypy import fmax + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmax(a, [ninf]*5) == a).all() + assert (fmax(a, [inf]*5) == [inf]*5).all() + assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all() + assert math.isnan(fmax(nan, 0)) + assert math.isnan(fmax(0, nan)) + assert math.isnan(fmax(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmax(nnan, nan)) == -1.0 + + def test_fmin(self): + from _numpypy import fmin + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmin(a, [ninf]*5) == [ninf]*5).all() + assert (fmin(a, [inf]*5) == a).all() + assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all() + assert math.isnan(fmin(nan, 0)) + assert math.isnan(fmin(0, nan)) + assert math.isnan(fmin(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmin(nnan, nan)) == -1.0 + def test_fmod(self): from _numpypy import fmod import math diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -631,6 +631,22 @@ return math.fabs(v) @simple_binary_op + def fmax(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return max(v1, v2) + + @simple_binary_op + def fmin(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return min(v1, v2) + + @simple_binary_op def fmod(self, v1, v2): try: return math.fmod(v1, v2) From noreply at buildbot.pypy.org Thu Mar 29 05:46:30 2012 From: noreply at buildbot.pypy.org (taavi_burns) Date: Thu, 29 Mar 2012 05:46:30 +0200 (CEST) Subject: [pypy-commit] pypy numpy-ufuncs3: Exposing invert Message-ID: <20120329034630.91846822B2@wyvern.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: numpy-ufuncs3 Changeset: r54056:348c30d55c51 Date: 2012-03-28 23:45 -0400 http://bitbucket.org/pypy/pypy/changeset/348c30d55c51/ Log: Exposing invert diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -131,6 +131,7 @@ ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), + ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), ('isneginf', 'isneginf'), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -591,10 +591,11 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from _numpypy import bitwise_not, array + from _numpypy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() + assert (invert(a) == ~a).all() def test_comparisons(self): import operator From noreply at buildbot.pypy.org Thu Mar 29 05:50:58 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 05:50:58 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for runicode test Message-ID: <20120329035058.BFFD3822B2@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54057:4538eb3f3810 Date: 2012-03-28 23:50 -0400 http://bitbucket.org/pypy/pypy/changeset/4538eb3f3810/ Log: fix for runicode test diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -478,7 +478,7 @@ s2 = s[3:] s3 = s[3:10] s4 = s[42:44] - return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s and s4 == "" + return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s and s4 == const('') res = self.interpret(fn, [0]) assert res From noreply at buildbot.pypy.org Thu Mar 29 05:51:00 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 05:51:00 +0200 (CEST) Subject: [pypy-commit] pypy default: wrap nicely Message-ID: <20120329035100.035EA822B2@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54058:029ef1dd4a1a Date: 2012-03-28 23:50 -0400 http://bitbucket.org/pypy/pypy/changeset/029ef1dd4a1a/ Log: wrap nicely diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -478,7 +478,10 @@ s2 = s[3:] s3 = s[3:10] s4 = s[42:44] - return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s and s4 == const('') + return (s1+s2 == s and + s2+s1 == const('lohel') and + s1+s3 == s and + s4 == const('')) res = self.interpret(fn, [0]) assert res From noreply at buildbot.pypy.org Thu Mar 29 13:15:56 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 13:15:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Revert 3b13b7c4c388 through 029ef1dd4a1a: this may look like a good Message-ID: <20120329111556.69C208236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54059:516cbe7d60c0 Date: 2012-03-29 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/516cbe7d60c0/ Log: Revert 3b13b7c4c388 through 029ef1dd4a1a: this may look like a good idea, but it creates a new path for the JIT, which is problematic in some cases. Reverting unless further discussion justifies it. diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,8 +765,7 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 - if lgt <= 0: - return s1.empty() + assert lgt >= 0 newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -222,8 +222,6 @@ length = s.ll_strlen() if stop > length: stop = length - if start > stop: - start = stop return s.ll_substring(start, stop-start) def ll_stringslice_minusone(s): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -477,11 +477,7 @@ s1 = s[:3] s2 = s[3:] s3 = s[3:10] - s4 = s[42:44] - return (s1+s2 == s and - s2+s1 == const('lohel') and - s1+s3 == s and - s4 == const('')) + return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s res = self.interpret(fn, [0]) assert res From noreply at buildbot.pypy.org Thu Mar 29 14:47:47 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 14:47:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Bring back 3b13b7c4c388 through 029ef1dd4a1a; fix slicing when stop > start Message-ID: <20120329124747.F405E8236A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54060:2f888f931838 Date: 2012-03-29 08:40 -0400 http://bitbucket.org/pypy/pypy/changeset/2f888f931838/ Log: Bring back 3b13b7c4c388 through 029ef1dd4a1a; fix slicing when stop > start diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,7 +765,8 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 - assert lgt >= 0 + if lgt <= 0: + return s1.empty() newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -222,6 +222,8 @@ length = s.ll_strlen() if stop > length: stop = length + if start > stop: + start = stop return s.ll_substring(start, stop-start) def ll_stringslice_minusone(s): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -477,7 +477,11 @@ s1 = s[:3] s2 = s[3:] s3 = s[3:10] - return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s + s4 = s[42:44] + return (s1+s2 == s and + s2+s1 == const('lohel') and + s1+s3 == s and + s4 == const('')) res = self.interpret(fn, [0]) assert res From noreply at buildbot.pypy.org Thu Mar 29 14:47:49 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 14:47:49 +0200 (CEST) Subject: [pypy-commit] pypy default: add comment Message-ID: <20120329124749.8D3D98236A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54061:49edece70bd7 Date: 2012-03-29 08:43 -0400 http://bitbucket.org/pypy/pypy/changeset/49edece70bd7/ Log: add comment diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,6 +765,8 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 + # If start >= stop, return a empty string. This can happen if the start + # is greater than the length of the string. if lgt <= 0: return s1.empty() newstr = s1.malloc(lgt) diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -222,6 +222,8 @@ length = s.ll_strlen() if stop > length: stop = length + # If start > stop, return a empty string. This can happen if the start + # is greater than the length of the string. if start > stop: start = stop return s.ll_substring(start, stop-start) From noreply at buildbot.pypy.org Thu Mar 29 14:51:47 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 14:51:47 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid creating another JIT path Message-ID: <20120329125147.6D5E48236A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54062:d6bef3538524 Date: 2012-03-29 08:51 -0400 http://bitbucket.org/pypy/pypy/changeset/d6bef3538524/ Log: avoid creating another JIT path diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,9 +765,10 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 - # If start >= stop, return a empty string. This can happen if the start - # is greater than the length of the string. - if lgt <= 0: + # If start > stop, return a empty string. This can happen if the start + # is greater than the length of the string. Use < instead <= to avoid + # creating another path for the JIT. + if lgt < 0: return s1.empty() newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) From noreply at buildbot.pypy.org Thu Mar 29 14:53:09 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 14:53:09 +0200 (CEST) Subject: [pypy-commit] pypy default: grammar Message-ID: <20120329125309.EADF18236A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54063:b5cf20353d19 Date: 2012-03-29 08:52 -0400 http://bitbucket.org/pypy/pypy/changeset/b5cf20353d19/ Log: grammar diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -766,7 +766,7 @@ lgt = stop - start assert start >= 0 # If start > stop, return a empty string. This can happen if the start - # is greater than the length of the string. Use < instead <= to avoid + # is greater than the length of the string. Use < instead of <= to avoid # creating another path for the JIT. if lgt < 0: return s1.empty() From noreply at buildbot.pypy.org Thu Mar 29 14:54:19 2012 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 29 Mar 2012 14:54:19 +0200 (CEST) Subject: [pypy-commit] pypy default: be more specific Message-ID: <20120329125419.AECB48236A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r54064:9ae198609e0b Date: 2012-03-29 08:54 -0400 http://bitbucket.org/pypy/pypy/changeset/9ae198609e0b/ Log: be more specific diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -767,7 +767,7 @@ assert start >= 0 # If start > stop, return a empty string. This can happen if the start # is greater than the length of the string. Use < instead of <= to avoid - # creating another path for the JIT. + # creating another path for the JIT when start == stop. if lgt < 0: return s1.empty() newstr = s1.malloc(lgt) From noreply at buildbot.pypy.org Thu Mar 29 16:56:51 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:56:51 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1c041f97db71 on branch numpypy-reshape Message-ID: <20120329145651.4863D8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54065:3ecbf4950085 Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/3ecbf4950085/ Log: Merge closed head 1c041f97db71 on branch numpypy-reshape From noreply at buildbot.pypy.org Thu Mar 29 16:56:52 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:56:52 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head cc88cec43893 on branch matrixmath-dot Message-ID: <20120329145652.6A4F38236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54066:02314fae286b Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/02314fae286b/ Log: Merge closed head cc88cec43893 on branch matrixmath-dot From noreply at buildbot.pypy.org Thu Mar 29 16:56:54 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:56:54 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head fdfd5bbea0c4 on branch merge-2.7.2 Message-ID: <20120329145654.9A0C08236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54067:ca91c114316f Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ca91c114316f/ Log: Merge closed head fdfd5bbea0c4 on branch merge-2.7.2 From noreply at buildbot.pypy.org Thu Mar 29 16:56:56 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:56:56 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head b21805c30c04 on branch builtin-module Message-ID: <20120329145656.C81328236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54068:ae6dc9cbb6a8 Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ae6dc9cbb6a8/ Log: Merge closed head b21805c30c04 on branch builtin-module From noreply at buildbot.pypy.org Thu Mar 29 16:56:59 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:56:59 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head df5b775bc528 on branch numpy-single-jitdriver Message-ID: <20120329145659.01D978236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54069:98d42db17d7f Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/98d42db17d7f/ Log: Merge closed head df5b775bc528 on branch numpy-single-jitdriver From noreply at buildbot.pypy.org Thu Mar 29 16:57:01 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:01 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 43148440cc3f on branch string-NUL Message-ID: <20120329145701.37A7D8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54070:d5992054cf65 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/d5992054cf65/ Log: Merge closed head 43148440cc3f on branch string-NUL From noreply at buildbot.pypy.org Thu Mar 29 16:57:03 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:03 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3720ee526894 on branch stm-gc Message-ID: <20120329145703.87D878236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54071:f3a6efab3f6b Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/f3a6efab3f6b/ Log: Merge closed head 3720ee526894 on branch stm-gc From noreply at buildbot.pypy.org Thu Mar 29 16:57:04 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:04 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head bb928c63c548 on branch ppc-jit-backend-rpythonization Message-ID: <20120329145704.A694C8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54072:5bd05e79d4e0 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/5bd05e79d4e0/ Log: Merge closed head bb928c63c548 on branch ppc-jit-backend- rpythonization From noreply at buildbot.pypy.org Thu Mar 29 16:57:05 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:05 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 028a592738b8 on branch sse-vectorization Message-ID: <20120329145705.C54568236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54073:91c398451efe Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/91c398451efe/ Log: Merge closed head 028a592738b8 on branch sse-vectorization From noreply at buildbot.pypy.org Thu Mar 29 16:57:06 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:06 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 698dc6400468 on branch miniscan Message-ID: <20120329145706.E51688236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54074:3da17fc17a1d Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/3da17fc17a1d/ Log: Merge closed head 698dc6400468 on branch miniscan From noreply at buildbot.pypy.org Thu Mar 29 16:57:08 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:08 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6cd6773cb83a on branch faster-str-decode-escape Message-ID: <20120329145708.1BA328236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54075:c8a70df82d4f Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/c8a70df82d4f/ Log: Merge closed head 6cd6773cb83a on branch faster-str-decode-escape From noreply at buildbot.pypy.org Thu Mar 29 16:57:09 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:09 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 2384ed17e36f on branch translation-time-measurments Message-ID: <20120329145709.3D7568236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54076:cbe4cba8c0a3 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/cbe4cba8c0a3/ Log: Merge closed head 2384ed17e36f on branch translation-time- measurments From noreply at buildbot.pypy.org Thu Mar 29 16:57:10 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:10 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 77b9215879ee on branch speedup-list-comprehension Message-ID: <20120329145710.59DF48236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54077:9376b7dfd9ce Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/9376b7dfd9ce/ Log: Merge closed head 77b9215879ee on branch speedup-list-comprehension From noreply at buildbot.pypy.org Thu Mar 29 16:57:11 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:11 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7773199776cc on branch pyarg-parsebuffer-s-star-buffer Message-ID: <20120329145711.778588236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54078:040ceee5a5ea Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/040ceee5a5ea/ Log: Merge closed head 7773199776cc on branch pyarg-parsebuffer-s-star- buffer From noreply at buildbot.pypy.org Thu Mar 29 16:57:12 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:12 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0cb14871b9aa on branch xapian Message-ID: <20120329145712.B70BC8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54079:983c7d1d37c9 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/983c7d1d37c9/ Log: Merge closed head 0cb14871b9aa on branch xapian From noreply at buildbot.pypy.org Thu Mar 29 16:57:13 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:13 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0fa3060deaee on branch jit-resizable-list Message-ID: <20120329145713.DC3948236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54080:df845eb6f25b Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/df845eb6f25b/ Log: Merge closed head 0fa3060deaee on branch jit-resizable-list From noreply at buildbot.pypy.org Thu Mar 29 16:57:15 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:15 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head b91746e9cbb9 on branch newindex Message-ID: <20120329145715.057078236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54081:4dd1bca269de Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/4dd1bca269de/ Log: Merge closed head b91746e9cbb9 on branch newindex From noreply at buildbot.pypy.org Thu Mar 29 16:57:16 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:16 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 047981f9d0d2 on branch numpy-record-dtypes Message-ID: <20120329145716.343718236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54082:f7646bc94f85 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/f7646bc94f85/ Log: Merge closed head 047981f9d0d2 on branch numpy-record-dtypes From noreply at buildbot.pypy.org Thu Mar 29 16:57:17 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:17 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0d19de864ceb on branch float-bytes Message-ID: <20120329145717.588A98236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54083:241cc4ffd609 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/241cc4ffd609/ Log: Merge closed head 0d19de864ceb on branch float-bytes From noreply at buildbot.pypy.org Thu Mar 29 16:57:18 2012 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Mar 2012 16:57:18 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20120329145718.7856A8236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r54084:4e25588f0ff0 Date: 2012-03-29 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/4e25588f0ff0/ Log: re-close this branch From noreply at buildbot.pypy.org Thu Mar 29 19:40:12 2012 From: noreply at buildbot.pypy.org (taavi_burns) Date: Thu, 29 Mar 2012 19:40:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge in more ufuncs Message-ID: <20120329174012.40ABA8236A@wyvern.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r54085:285ff15e1498 Date: 2012-03-29 11:31 -0400 http://bitbucket.org/pypy/pypy/changeset/285ff15e1498/ Log: Merge in more ufuncs diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -99,6 +99,8 @@ ("exp2", "exp2"), ("expm1", "expm1"), ("fabs", "fabs"), + ("fmax", "fmax"), + ("fmin", "fmin"), ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), @@ -122,12 +124,14 @@ ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), + ('square', 'square'), ("tan", "tan"), ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), + ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), ('isneginf', 'isneginf'), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -541,6 +541,8 @@ ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmax", "fmax", 2, {"promote_to_float": True}), + ("fmin", "fmin", 2, {"promote_to_float": True}), ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), @@ -549,6 +551,7 @@ ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ('square', 'square', 1, {'promote_to_float': True}), ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -135,6 +135,38 @@ assert fabs(float('-inf')) == float('inf') assert isnan(fabs(float('nan'))) + def test_fmax(self): + from _numpypy import fmax + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmax(a, [ninf]*5) == a).all() + assert (fmax(a, [inf]*5) == [inf]*5).all() + assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all() + assert math.isnan(fmax(nan, 0)) + assert math.isnan(fmax(0, nan)) + assert math.isnan(fmax(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmax(nnan, nan)) == -1.0 + + def test_fmin(self): + from _numpypy import fmin + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmin(a, [ninf]*5) == [ninf]*5).all() + assert (fmin(a, [inf]*5) == a).all() + assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all() + assert math.isnan(fmin(nan, 0)) + assert math.isnan(fmin(0, nan)) + assert math.isnan(fmin(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmin(nnan, nan)) == -1.0 + def test_fmod(self): from _numpypy import fmod import math @@ -455,6 +487,19 @@ assert math.isnan(sqrt(-1)) assert math.isnan(sqrt(nan)) + def test_square(self): + import math + from _numpypy import square + + nan, inf, ninf = float("nan"), float("inf"), float("-inf") + + assert math.isnan(square(nan)) + assert math.isinf(square(inf)) + assert math.isinf(square(ninf)) + assert square(ninf) > 0 + assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)] + assert math.isinf(square(1e300)) + def test_radians(self): import math from _numpypy import radians, array @@ -546,10 +591,11 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from _numpypy import bitwise_not, array + from _numpypy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() + assert (invert(a) == ~a).all() def test_comparisons(self): import operator diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -631,6 +631,22 @@ return math.fabs(v) @simple_binary_op + def fmax(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return max(v1, v2) + + @simple_binary_op + def fmin(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return min(v1, v2) + + @simple_binary_op def fmod(self, v1, v2): try: return math.fmod(v1, v2) @@ -741,6 +757,10 @@ except ValueError: return rfloat.NAN + @simple_unary_op + def square(self, v): + return v*v + @raw_unary_op def isnan(self, v): return rfloat.isnan(v) From noreply at buildbot.pypy.org Fri Mar 30 02:45:40 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 30 Mar 2012 02:45:40 +0200 (CEST) Subject: [pypy-commit] pypy default: cpyext: add support for buffer(array.array) Message-ID: <20120330004540.AC2AB8236A@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54086:4f1f458bedae Date: 2012-03-30 02:12 +0200 http://bitbucket.org/pypy/pypy/changeset/4f1f458bedae/ Log: cpyext: add support for buffer(array.array) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,6 +4,8 @@ PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError +from pypy.module.array.interp_array import ArrayBuffer PyBufferObjectStruct = lltype.ForwardReference() @@ -43,10 +45,15 @@ if isinstance(w_obj, StringBuffer): py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) + py_buf.c_b_size = w_obj.getlength() + elif isinstance(w_obj, ArrayBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.data) py_buf.c_b_size = w_obj.getlength() else: - raise Exception("Fail fail fail fail fail") + raise OperationError(space.w_NotImplementedError, space.wrap( + "buffer flavor not supported")) def buffer_realize(space, py_obj): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -48,3 +48,17 @@ ]) b = module.buffer_new() raises(AttributeError, getattr, b, 'x') + + def test_array_buffer(self): + module = self.import_extension('foo', [ + ("roundtrip", "METH_O", + """ + PyBufferObject *buf = (PyBufferObject *)args; + return PyString_FromStringAndSize(buf->b_ptr, buf->b_size); + """), + ]) + import array + a = array.array('c', 'text') + b = buffer(a) + assert module.roundtrip(b) == 'text' + From noreply at buildbot.pypy.org Fri Mar 30 03:41:16 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 30 Mar 2012 03:41:16 +0200 (CEST) Subject: [pypy-commit] pypy dynamic-specialized-tuple: Kill this optimization, it's bogus. It could, in theory work if neither shape has an object in it. Message-ID: <20120330014116.5B7298236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dynamic-specialized-tuple Changeset: r54087:eaefbf566c6f Date: 2012-03-29 21:41 -0400 http://bitbucket.org/pypy/pypy/changeset/eaefbf566c6f/ Log: Kill this optimization, it's bogus. It could, in theory work if neither shape has an object in it. diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -114,8 +114,6 @@ @jit.look_inside_iff(tuple_unroll_condition) def eq__Tuple_Tuple(space, w_tuple1, w_tuple2): - if w_tuple1.tuplestorage.getshape() is not w_tuple2.tuplestorage.getshape(): - return space.w_False if w_tuple1.length() != w_tuple2.length(): return space.w_False for i in xrange(w_tuple1.length()): From noreply at buildbot.pypy.org Fri Mar 30 04:00:07 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 30 Mar 2012 04:00:07 +0200 (CEST) Subject: [pypy-commit] pypy dynamic-specialized-tuple: kill an assert that doesn't hold now. Message-ID: <20120330020007.DFA468236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dynamic-specialized-tuple Changeset: r54088:5b813268023d Date: 2012-03-29 21:59 -0400 http://bitbucket.org/pypy/pypy/changeset/5b813268023d/ Log: kill an assert that doesn't hold now. diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -59,7 +59,6 @@ def make_len_gt(self, mode, descr, val): if self.lenbound: assert self.lenbound.mode == mode - assert self.lenbound.descr == descr self.lenbound.bound.make_gt(IntBound(val, val)) else: self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1)) From noreply at buildbot.pypy.org Fri Mar 30 04:46:06 2012 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 30 Mar 2012 04:46:06 +0200 (CEST) Subject: [pypy-commit] pypy dynamic-specialized-tuple: Start trying to get this test to pass under x86, now it segfaults. Message-ID: <20120330024606.5A6CD8236A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dynamic-specialized-tuple Changeset: r54089:1bbdd65e9f7e Date: 2012-03-29 22:45 -0400 http://bitbucket.org/pypy/pypy/changeset/1bbdd65e9f7e/ Log: Start trying to get this test to pass under x86, now it segfaults. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -8,7 +8,7 @@ from pypy.jit.backend.model import AbstractCPU from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes -from pypy.jit.backend.llsupport.descr import ( +from pypy.jit.backend.llsupport.descr import (FLAG_POINTER, get_size_descr, get_field_descr, get_array_descr, get_call_descr, get_interiorfield_descr, get_dynamic_interiorfield_descr, FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr) @@ -291,6 +291,9 @@ return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags) + def copy_and_change_descr_typeinfo_to_ptr(self, descr): + return ArrayDescr(descr.basesize, descr.itemsize, descr.lendescr, FLAG_POINTER) + def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) ovf_inst = lltype.cast_opaque_ptr(llmemory.GCREF, diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3806,6 +3806,17 @@ res = self.interp_operations(f, [x]) assert res == x or math.isnan(x) and math.isnan(res) + def test_untyped_storage(self): + class A(object): + def __init__(self, v): + self.v = v + def f(x): + storage = rerased_raw.UntypedStorage("io") + storage.setint(0, x) + storage.setinstance(1, A(x * 10)) + return storage.getint(0) + storage.getinstance(1, A).v + res = self.interp_operations(f, [5]) + assert res == 55 class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): @@ -3933,14 +3944,3 @@ self.interp_operations(f, [1, 2, 3]) self.check_operations_history(call=1, guard_no_exception=0) - def test_untyped_storage(self): - class A(object): - def __init__(self, v): - self.v = v - def f(x): - storage = rerased_raw.UntypedStorage("io") - storage.setint(0, x) - storage.setinstance(1, A(x * 10)) - return storage.getint(0) + storage.getinstance(1, A).v - res = self.interp_operations(f, [5]) - assert res == 55 \ No newline at end of file From noreply at buildbot.pypy.org Fri Mar 30 09:42:02 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Mar 2012 09:42:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Print the revision in addition to the branch name. Message-ID: <20120330074202.8CB4A82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54090:45e80fe76457 Date: 2012-03-29 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/45e80fe76457/ Log: Print the revision in addition to the branch name. diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -38,7 +38,7 @@ closed_heads.reverse() for head, branch in closed_heads: - print '\t', branch + print '\t', head, '\t', branch print print 'The branches listed above will be merged to "closed-branches".' print 'You need to run this script in a clean working copy where you' From noreply at buildbot.pypy.org Fri Mar 30 09:42:03 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Mar 2012 09:42:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Optimization for the JIT: do not escape the frame when seeing code that Message-ID: <20120330074203.D960F82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54091:42c400cba53a Date: 2012-03-30 01:17 +0200 http://bitbucket.org/pypy/pypy/changeset/42c400cba53a/ Log: Optimization for the JIT: do not escape the frame when seeing code that reads e.g. 'sys.exc_info()[1]' or 'sys.exc_info()[:2]'. The frame would escape only if we read the last item of the returned tuple. Lots of tweaks needed, but at least in the simple cases it should work. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -296,6 +296,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self.frame_trace_action = FrameTraceAction(self) + self._code_of_sys_exc_info = None from pypy.interpreter.pycode import cpython_magic, default_magic self.our_magic = default_magic diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -154,6 +154,7 @@ #operationerr.print_detailed_traceback(self.space) def _convert_exc(self, operr): + # Only for the flow object space return operr def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -113,6 +113,12 @@ from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit + # + if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info + and nargs == 0): + from pypy.module.sys.vm import exc_info_direct + return exc_info_direct(self.space, frame) + # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -874,6 +874,12 @@ fn.add_to_table() if gateway.as_classmethod: fn = ClassMethod(space.wrap(fn)) + # + from pypy.module.sys.vm import exc_info + if code._bltin is exc_info: + assert space._code_of_sys_exc_info is None + space._code_of_sys_exc_info = code + # return fn diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -595,3 +595,121 @@ assert len(frames) == 1 _, other_frame = frames.popitem() assert other_frame.f_code.co_name in ('other_thread', '?') + + +class AppTestSysExcInfoDirect: + + def setup_method(self, meth): + self.seen = [] + from pypy.module.sys import vm + def exc_info_with_tb(*args): + self.seen.append("n") # not optimized + return self.old[0](*args) + def exc_info_without_tb(*args): + self.seen.append("y") # optimized + return self.old[1](*args) + self.old = [vm.exc_info_with_tb, vm.exc_info_without_tb] + vm.exc_info_with_tb = exc_info_with_tb + vm.exc_info_without_tb = exc_info_without_tb + # + from pypy.rlib import jit + self.old2 = [jit.we_are_jitted] + jit.we_are_jitted = lambda: True + + def teardown_method(self, meth): + from pypy.module.sys import vm + from pypy.rlib import jit + vm.exc_info_with_tb = self.old[0] + vm.exc_info_without_tb = self.old[1] + jit.we_are_jitted = self.old2[0] + # + assert ''.join(self.seen) == meth.expected + + def test_returns_none(self): + import sys + assert sys.exc_info() == (None, None, None) + assert sys.exc_info()[0] is None + assert sys.exc_info()[1] is None + assert sys.exc_info()[2] is None + assert sys.exc_info()[:2] == (None, None) + assert sys.exc_info()[:3] == (None, None, None) + assert sys.exc_info()[0:2] == (None, None) + assert sys.exc_info()[2:4] == (None,) + test_returns_none.expected = 'nnnnnnnn' + + def test_returns_subscr(self): + import sys + e = KeyError("boom") + try: + raise e + except: + assert sys.exc_info()[0] is KeyError # y + assert sys.exc_info()[1] is e # y + assert sys.exc_info()[2] is not None # n + assert sys.exc_info()[-3] is KeyError # y + assert sys.exc_info()[-2] is e # y + assert sys.exc_info()[-1] is not None # n + test_returns_subscr.expected = 'yynyyn' + + def test_returns_slice_2(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[:0] == () # y + assert sys.exc_info()[:1] == foo[:1] # y + assert sys.exc_info()[:2] == foo[:2] # y + assert sys.exc_info()[:3] == foo # n + assert sys.exc_info()[:4] == foo # n + assert sys.exc_info()[:-1] == foo[:2] # y + assert sys.exc_info()[:-2] == foo[:1] # y + assert sys.exc_info()[:-3] == () # y + test_returns_slice_2.expected = 'nyyynnyyy' + + def test_returns_slice_3(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[2:2] == () # y + assert sys.exc_info()[0:1] == foo[:1] # y + assert sys.exc_info()[1:2] == foo[1:2] # y + assert sys.exc_info()[0:3] == foo # n + assert sys.exc_info()[2:4] == foo[2:] # n + assert sys.exc_info()[0:-1] == foo[:2] # y + assert sys.exc_info()[0:-2] == foo[:1] # y + assert sys.exc_info()[5:-3] == () # y + test_returns_slice_3.expected = 'nyyynnyyy' + + def test_strange_invocation(self): + import sys + e = KeyError("boom") + try: + raise e + except: + a = []; k = {} + assert sys.exc_info(*a)[:0] == () + assert sys.exc_info(**k)[:0] == () + test_strange_invocation.expected = 'nn' + + def test_call_in_subfunction(self): + import sys + def g(): + # this case is not optimized, because we need to search the + # frame chain. it's probably not worth the complications + return sys.exc_info()[1] + e = KeyError("boom") + try: + raise e + except: + assert g() is e + test_call_in_subfunction.expected = 'n' + + +class AppTestSysExcInfoDirectCallMethod(AppTestSysExcInfoDirect): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.opcodes.CALL_METHOD": True}) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -89,6 +89,9 @@ """Return the (type, value, traceback) of the most recent exception caught by an except clause in the current stack frame or in an older stack frame.""" + return exc_info_with_tb(space) # indirection for the tests + +def exc_info_with_tb(space): operror = space.getexecutioncontext().sys_exc_info() if operror is None: return space.newtuple([space.w_None,space.w_None,space.w_None]) @@ -96,6 +99,59 @@ return space.newtuple([operror.w_type, operror.get_w_value(space), space.wrap(operror.get_traceback())]) +def exc_info_without_tb(space, frame): + operror = frame.last_exception + return space.newtuple([operror.w_type, operror.get_w_value(space), + space.w_None]) + +def exc_info_direct(space, frame): + from pypy.tool import stdlib_opcode + # In order to make the JIT happy, we try to return (exc, val, None) + # instead of (exc, val, tb). We can do that only if we recognize + # the following pattern in the bytecode: + # CALL_FUNCTION/CALL_METHOD <-- invoking me + # LOAD_CONST 0, 1, -2 or -3 + # BINARY_SUBSCR + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST <=2 + # SLICE_2 + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST any integer + # LOAD_CONST <=2 + # SLICE_3 + need_all_three_args = True + co = frame.getcode().co_code + p = frame.last_instr + if (ord(co[p]) == stdlib_opcode.CALL_FUNCTION or + ord(co[p]) == stdlib_opcode.CALL_METHOD): + if ord(co[p+3]) == stdlib_opcode.LOAD_CONST: + lo = ord(co[p+4]) + hi = ord(co[p+5]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + constant = space.int_w(w_constant) + if ord(co[p+6]) == stdlib_opcode.BINARY_SUBSCR: + if -3 <= constant <= 1 and constant != -1: + need_all_three_args = False + elif ord(co[p+6]) == stdlib_opcode.SLICE+2: + if constant <= 2: + need_all_three_args = False + elif (ord(co[p+6]) == stdlib_opcode.LOAD_CONST and + ord(co[p+9]) == stdlib_opcode.SLICE+3): + lo = ord(co[p+7]) + hi = ord(co[p+8]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + if space.int_w(w_constant) <= 2: + need_all_three_args = False + # + if need_all_three_args or frame.last_exception is None or frame.hide(): + return exc_info_with_tb(space) + else: + return exc_info_without_tb(space, frame) + def exc_clear(space): """Clear global information on the current exception. Subsequent calls to exc_info() will return (None,None,None) until another exception is From noreply at buildbot.pypy.org Fri Mar 30 09:42:05 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Mar 2012 09:42:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for 42c400cba53a. Message-ID: <20120330074205.2DF3782252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54092:30ba296a852d Date: 2012-03-30 09:37 +0200 http://bitbucket.org/pypy/pypy/changeset/30ba296a852d/ Log: Add a test for 42c400cba53a. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -351,3 +351,23 @@ # the following assertion fails if the loop was cancelled due # to "abort: vable escape" assert len(log.loops_by_id("eval")) == 1 + + def test_sys_exc_info(self): + def main(): + i = 1 + lst = [i] + while i < 1000: + try: + return lst[i] + except: + e = sys.exc_info()[1] # ID: exc_info + if not isinstance(e, IndexError): + raise + i += 1 + return 42 + + log = self.run(main) + assert log.result == 42 + # the following assertion fails if the loop was cancelled due + # to "abort: vable escape" + assert len(log.loops_by_id("exc_info")) == 1 From noreply at buildbot.pypy.org Fri Mar 30 09:42:06 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Mar 2012 09:42:06 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20120330074206.DF63F82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54093:849a04adb9cb Date: 2012-03-30 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/849a04adb9cb/ Log: merge heads diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,6 +4,8 @@ PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError +from pypy.module.array.interp_array import ArrayBuffer PyBufferObjectStruct = lltype.ForwardReference() @@ -43,10 +45,15 @@ if isinstance(w_obj, StringBuffer): py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) + py_buf.c_b_size = w_obj.getlength() + elif isinstance(w_obj, ArrayBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.data) py_buf.c_b_size = w_obj.getlength() else: - raise Exception("Fail fail fail fail fail") + raise OperationError(space.w_NotImplementedError, space.wrap( + "buffer flavor not supported")) def buffer_realize(space, py_obj): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -48,3 +48,17 @@ ]) b = module.buffer_new() raises(AttributeError, getattr, b, 'x') + + def test_array_buffer(self): + module = self.import_extension('foo', [ + ("roundtrip", "METH_O", + """ + PyBufferObject *buf = (PyBufferObject *)args; + return PyString_FromStringAndSize(buf->b_ptr, buf->b_size); + """), + ]) + import array + a = array.array('c', 'text') + b = buffer(a) + assert module.roundtrip(b) == 'text' + diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -99,6 +99,8 @@ ("exp2", "exp2"), ("expm1", "expm1"), ("fabs", "fabs"), + ("fmax", "fmax"), + ("fmin", "fmin"), ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), @@ -122,12 +124,14 @@ ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), + ('square', 'square'), ("tan", "tan"), ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), + ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), ('isneginf', 'isneginf'), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -541,6 +541,8 @@ ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmax", "fmax", 2, {"promote_to_float": True}), + ("fmin", "fmin", 2, {"promote_to_float": True}), ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), @@ -549,6 +551,7 @@ ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ('square', 'square', 1, {'promote_to_float': True}), ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -135,6 +135,38 @@ assert fabs(float('-inf')) == float('inf') assert isnan(fabs(float('nan'))) + def test_fmax(self): + from _numpypy import fmax + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmax(a, [ninf]*5) == a).all() + assert (fmax(a, [inf]*5) == [inf]*5).all() + assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all() + assert math.isnan(fmax(nan, 0)) + assert math.isnan(fmax(0, nan)) + assert math.isnan(fmax(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmax(nnan, nan)) == -1.0 + + def test_fmin(self): + from _numpypy import fmin + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmin(a, [ninf]*5) == [ninf]*5).all() + assert (fmin(a, [inf]*5) == a).all() + assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all() + assert math.isnan(fmin(nan, 0)) + assert math.isnan(fmin(0, nan)) + assert math.isnan(fmin(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmin(nnan, nan)) == -1.0 + def test_fmod(self): from _numpypy import fmod import math @@ -455,6 +487,19 @@ assert math.isnan(sqrt(-1)) assert math.isnan(sqrt(nan)) + def test_square(self): + import math + from _numpypy import square + + nan, inf, ninf = float("nan"), float("inf"), float("-inf") + + assert math.isnan(square(nan)) + assert math.isinf(square(inf)) + assert math.isinf(square(ninf)) + assert square(ninf) > 0 + assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)] + assert math.isinf(square(1e300)) + def test_radians(self): import math from _numpypy import radians, array @@ -546,10 +591,11 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from _numpypy import bitwise_not, array + from _numpypy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() + assert (invert(a) == ~a).all() def test_comparisons(self): import operator diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -631,6 +631,22 @@ return math.fabs(v) @simple_binary_op + def fmax(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return max(v1, v2) + + @simple_binary_op + def fmin(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return min(v1, v2) + + @simple_binary_op def fmod(self, v1, v2): try: return math.fmod(v1, v2) @@ -741,6 +757,10 @@ except ValueError: return rfloat.NAN + @simple_unary_op + def square(self, v): + return v*v + @raw_unary_op def isnan(self, v): return rfloat.isnan(v) From noreply at buildbot.pypy.org Fri Mar 30 12:17:02 2012 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 30 Mar 2012 12:17:02 +0200 (CEST) Subject: [pypy-commit] pypy win64-stage1: hg merge default Message-ID: <20120330101702.8F2B282252@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r54094:f8828970a964 Date: 2012-03-30 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/f8828970a964/ Log: hg merge default diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -149,6 +149,22 @@ exported. This would give us a one-size-fits-all generic .so file to be imported by any application that wants to load .so files :-) +Optimising cpyext (CPython C-API compatibility layer) +----------------------------------------------------- + +A lot of work has gone into PyPy's implementation of CPython's C-API over +the last years to let it reach a practical level of compatibility, so that +C extensions for CPython work on PyPy without major rewrites. However, +there are still many edges and corner cases where it misbehaves, and it has +not received any substantial optimisation so far. + +The objective of this project is to fix bugs in cpyext and to optimise +several performance critical parts of it, such as the reference counting +support and other heavily used C-API functions. The net result would be to +have CPython extensions run much faster on PyPy than they currently do, or +to make them work at all if they currently don't. A part of this work would +be to get cpyext into a shape where it supports running Cython generated +extensions. .. _`issue tracker`: http://bugs.pypy.org .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -199,17 +199,11 @@ The following features (present in some past Stackless version of PyPy) are for the time being not supported any more: -* Tasklets and channels (currently ``stackless.py`` seems to import, - but you have tasklets on top of coroutines on top of greenlets on - top of continulets on top of stacklets, and it's probably not too - hard to cut two of these levels by adapting ``stackless.py`` to - use directly continulets) - * Coroutines (could be rewritten at app-level) -* Pickling and unpickling continulets (*) - -* Continuing execution of a continulet in a different thread (*) +* Continuing execution of a continulet in a different thread + (but if it is "simple enough", you can pickle it and unpickle it + in the other thread). * Automatic unlimited stack (must be emulated__ so far) @@ -217,15 +211,6 @@ .. __: `recursion depth limit`_ -(*) Pickling, as well as changing threads, could be implemented by using -a "soft" stack switching mode again. We would get either "hard" or -"soft" switches, similarly to Stackless Python 3rd version: you get a -"hard" switch (like now) when the C stack contains non-trivial C frames -to save, and a "soft" switch (like previously) when it contains only -simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, and the switch might be a bit faster too -(unsure about that; what is the Stackless Python experience?). - Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -296,6 +296,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self.frame_trace_action = FrameTraceAction(self) + self._code_of_sys_exc_info = None from pypy.interpreter.pycode import cpython_magic, default_magic self.our_magic = default_magic @@ -467,9 +468,9 @@ if name not in modules: modules.append(name) - # a bit of custom logic: time2 or rctime take precedence over time + # a bit of custom logic: rctime take precedence over time # XXX this could probably be done as a "requires" in the config - if ('time2' in modules or 'rctime' in modules) and 'time' in modules: + if 'rctime' in modules and 'time' in modules: modules.remove('time') if not self.config.objspace.nofaking: diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -154,6 +154,7 @@ #operationerr.print_detailed_traceback(self.space) def _convert_exc(self, operr): + # Only for the flow object space return operr def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -113,6 +113,12 @@ from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit + # + if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info + and nargs == 0): + from pypy.module.sys.vm import exc_info_direct + return exc_info_direct(self.space, frame) + # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -874,6 +874,12 @@ fn.add_to_table() if gateway.as_classmethod: fn = ClassMethod(space.wrap(fn)) + # + from pypy.module.sys.vm import exc_info + if code._bltin is exc_info: + assert space._code_of_sys_exc_info is None + space._code_of_sys_exc_info = code + # return fn diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -2,34 +2,39 @@ from pypy.interpreter import unicodehelper from pypy.rlib.rstring import StringBuilder -def parsestr(space, encoding, s, unicode_literals=False): - # compiler.transformer.Transformer.decode_literal depends on what - # might seem like minor details of this function -- changes here - # must be reflected there. +def parsestr(space, encoding, s, unicode_literal=False): + """Parses a string or unicode literal, and return a wrapped value. + + If encoding=iso8859-1, the source string is also in this encoding. + If encoding=None, the source string is ascii only. + In other cases, the source string is in utf-8 encoding. + + When a bytes string is returned, it will be encoded with the + original encoding. + + Yes, it's very inefficient. + Yes, CPython has very similar code. + """ # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 quote = s[ps] rawmode = False - unicode = unicode_literals # string decoration handling - o = ord(quote) - isalpha = (o>=97 and o<=122) or (o>=65 and o<=90) - if isalpha or quote == '_': - if quote == 'b' or quote == 'B': - ps += 1 - quote = s[ps] - unicode = False - elif quote == 'u' or quote == 'U': - ps += 1 - quote = s[ps] - unicode = True - if quote == 'r' or quote == 'R': - ps += 1 - quote = s[ps] - rawmode = True + if quote == 'b' or quote == 'B': + ps += 1 + quote = s[ps] + unicode_literal = False + elif quote == 'u' or quote == 'U': + ps += 1 + quote = s[ps] + unicode_literal = True + if quote == 'r' or quote == 'R': + ps += 1 + quote = s[ps] + rawmode = True if quote != "'" and quote != '"': raise_app_valueerror(space, 'Internal error: parser passed unquoted literal') @@ -46,21 +51,28 @@ 'unmatched triple quotes in literal') q -= 2 - if unicode: # XXX Py_UnicodeFlag is ignored for now + if unicode_literal: # XXX Py_UnicodeFlag is ignored for now if encoding is None or encoding == "iso-8859-1": + # 'unicode_escape' expects latin-1 bytes, string is ready. buf = s bufp = ps bufq = q u = None else: - # "\XX" may become "\u005c\uHHLL" (12 bytes) + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. lis = [] # using a list to assemble the value end = q + # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes) while ps < end: if s[ps] == '\\': lis.append(s[ps]) ps += 1 if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. lis.append("u005c") if ord(s[ps]) & 0x80: # XXX inefficient w, ps = decode_utf8(space, s, ps, end, "utf-16-be") @@ -86,13 +98,11 @@ need_encoding = (encoding is not None and encoding != "utf-8" and encoding != "iso-8859-1") - # XXX add strchr like interface to rtyper assert 0 <= ps <= q substr = s[ps : q] if rawmode or '\\' not in s[ps:]: if need_encoding: w_u = space.wrap(unicodehelper.PyUnicode_DecodeUTF8(space, substr)) - #w_v = space.wrap(space.unwrap(w_u).encode(encoding)) this works w_v = unicodehelper.PyUnicode_AsEncodedString(space, w_u, space.wrap(encoding)) return w_v else: diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -27,6 +27,12 @@ def constfloat(x): return ConstFloat(longlong.getfloatstorage(x)) +def boxlonglong(ll): + if longlong.is_64_bit: + return BoxInt(ll) + else: + return BoxFloat(ll) + class Runner(object): @@ -1623,6 +1629,11 @@ [boxfloat(2.5)], t).value assert res == longlong2float.float2longlong(2.5) + bytes = longlong2float.float2longlong(2.5) + res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, + [boxlonglong(res)], 'float').value + assert longlong.getrealfloat(res) == 2.5 + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -328,6 +328,15 @@ def produce_into(self, builder, r): self.put(builder, [r.choice(builder.intvars)]) +class CastLongLongToFloatOperation(AbstractFloatOperation): + def produce_into(self, builder, r): + if longlong.is_64_bit: + self.put(builder, [r.choice(builder.intvars)]) + else: + if not builder.floatvars: + raise CannotProduceOperation + self.put(builder, [r.choice(builder.floatvars)]) + class CastFloatToIntOperation(AbstractFloatOperation): def produce_into(self, builder, r): if not builder.floatvars: @@ -450,6 +459,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) +OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT)) OperationBuilder.OPERATIONS = OPERATIONS diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1251,6 +1251,15 @@ else: self.mov(loc0, resloc) + def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -773,10 +773,24 @@ self.Perform(op, [loc0], loc1) self.xrm.possibly_free_var(op.getarg(0)) else: - loc0 = self.xrm.loc(op.getarg(0)) + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) + + def consider_convert_longlong_bytes_to_float(self, op): + if longlong.is_64_bit: + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) + else: + arg0 = op.getarg(0) + loc0 = self.xrm.make_sure_var_in_reg(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(arg0) def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -295,6 +295,7 @@ return op rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite # ---------- # Various kinds of calls diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,20 +968,22 @@ int_return %i2 """, transform=True) - def test_convert_float_bytes_to_int(self): - from pypy.rlib.longlong2float import float2longlong + def test_convert_float_bytes(self): + from pypy.rlib.longlong2float import float2longlong, longlong2float def f(x): - return float2longlong(x) + ll = float2longlong(x) + return longlong2float(ll) if longlong.is_64_bit: - result_var = "%i0" - return_op = "int_return" + tmp_var = "%i0" + result_var = "%f1" else: - result_var = "%f1" - return_op = "float_return" + tmp_var = "%f1" + result_var = "%f2" self.encoding_test(f, [25.0], """ - convert_float_bytes_to_longlong %%f0 -> %(result_var)s - %(return_op)s %(result_var)s - """ % {"result_var": result_var, "return_op": return_op}) + convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s + convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s + float_return %(result_var)s + """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True) def check_force_cast(FROM, TO, operations, value): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -672,6 +672,11 @@ a = longlong.getrealfloat(a) return longlong2float.float2longlong(a) + @arguments(LONGLONG_TYPECODE, returns="f") + def bhimpl_convert_longlong_bytes_to_float(a): + a = longlong2float.longlong2float(a) + return longlong.getfloatstorage(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -224,6 +224,7 @@ 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', 'convert_float_bytes_to_longlong', + 'convert_longlong_bytes_to_float', ]: exec py.code.Source(''' @arguments("box") diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -420,6 +420,7 @@ 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', + 'CONVERT_LONGLONG_BYTES_TO_FLOAT/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,3 +1,4 @@ +import math import sys import py @@ -15,7 +16,7 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3795,15 +3796,15 @@ res = self.interp_operations(g, [1]) assert res == 3 - def test_float2longlong(self): + def test_float_bytes(self): def f(n): - return float2longlong(n) + ll = float2longlong(n) + return longlong2float(ll) for x in [2.5, float("nan"), -2.5, float("inf")]: # There are tests elsewhere to verify the correctness of this. - expected = float2longlong(x) res = self.interp_operations(f, [x]) - assert longlong.getfloatstorage(res) == expected + assert res == x or math.isnan(x) and math.isnan(res) class TestLLtype(BaseLLtypeTests, LLJitMixin): diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,6 +4,8 @@ PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError +from pypy.module.array.interp_array import ArrayBuffer PyBufferObjectStruct = lltype.ForwardReference() @@ -43,10 +45,15 @@ if isinstance(w_obj, StringBuffer): py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) + py_buf.c_b_size = w_obj.getlength() + elif isinstance(w_obj, ArrayBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.data) py_buf.c_b_size = w_obj.getlength() else: - raise Exception("Fail fail fail fail fail") + raise OperationError(space.w_NotImplementedError, space.wrap( + "buffer flavor not supported")) def buffer_realize(space, py_obj): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -48,3 +48,17 @@ ]) b = module.buffer_new() raises(AttributeError, getattr, b, 'x') + + def test_array_buffer(self): + module = self.import_extension('foo', [ + ("roundtrip", "METH_O", + """ + PyBufferObject *buf = (PyBufferObject *)args; + return PyString_FromStringAndSize(buf->b_ptr, buf->b_size); + """), + ]) + import array + a = array.array('c', 'text') + b = buffer(a) + assert module.roundtrip(b) == 'text' + diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -99,6 +99,8 @@ ("exp2", "exp2"), ("expm1", "expm1"), ("fabs", "fabs"), + ("fmax", "fmax"), + ("fmin", "fmin"), ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), @@ -122,12 +124,14 @@ ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), + ('square', 'square'), ("tan", "tan"), ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), + ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), ('isneginf', 'isneginf'), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -3,9 +3,11 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_boxes, interp_dtype, support, loop +from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name + class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] @@ -28,7 +30,7 @@ return self.identity def descr_call(self, space, __args__): - from interp_numarray import BaseArray + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do @@ -179,7 +181,7 @@ elif out.shape != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + - ' , got [%s]', + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.shape]), ) @@ -204,7 +206,7 @@ else: arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) val = loop.compute(arr) - return val + return val def do_axis_reduce(self, obj, dtype, axis, result): from pypy.module.micronumpy.interp_numarray import AxisReduce @@ -253,7 +255,7 @@ if isinstance(w_obj, Scalar): arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) if isinstance(out,Scalar): - out.value=arr + out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: @@ -265,7 +267,7 @@ if not broadcast_shape or broadcast_shape != out.shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + - ' to [%s]', + ' to [%s]', ",".join([str(x) for x in w_obj.shape]), ",".join([str(x) for x in out.shape]), ) @@ -292,10 +294,11 @@ self.func = func self.comparison_func = comparison_func + @jit.unroll_safe def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement, BaseArray) - if len(args_w)>2: + if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w @@ -326,7 +329,7 @@ w_rhs.value.convert_to(calc_dtype) ) if isinstance(out,Scalar): - out.value=arr + out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: @@ -337,7 +340,7 @@ if out and out.shape != shape_agreement(space, new_shape, out.shape): raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + - ' to [%s]', + ' to [%s]', ",".join([str(x) for x in new_shape]), ",".join([str(x) for x in out.shape]), ) @@ -347,7 +350,6 @@ w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) if out: - #out.add_invalidates(w_res) #causes a recursion loop w_res.get_concrete() return w_res @@ -539,6 +541,8 @@ ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmax", "fmax", 2, {"promote_to_float": True}), + ("fmin", "fmin", 2, {"promote_to_float": True}), ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), @@ -547,6 +551,7 @@ ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ('square', 'square', 1, {'promote_to_float': True}), ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,7 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError + at jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) def enumerate_chunks(chunks): result = [] i = -1 @@ -85,9 +86,9 @@ space.isinstance_w(w_item_or_slice, space.w_slice)): raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) - + start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - + coords = [0] * len(shape) i = start if order == 'C': diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,3 +1,7 @@ +from pypy.rlib import jit + + + at jit.look_inside_iff(lambda s: jit.isconstant(len(s))) def product(s): i = 1 for x in s: diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -135,6 +135,38 @@ assert fabs(float('-inf')) == float('inf') assert isnan(fabs(float('nan'))) + def test_fmax(self): + from _numpypy import fmax + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmax(a, [ninf]*5) == a).all() + assert (fmax(a, [inf]*5) == [inf]*5).all() + assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all() + assert math.isnan(fmax(nan, 0)) + assert math.isnan(fmax(0, nan)) + assert math.isnan(fmax(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmax(nnan, nan)) == -1.0 + + def test_fmin(self): + from _numpypy import fmin + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmin(a, [ninf]*5) == [ninf]*5).all() + assert (fmin(a, [inf]*5) == a).all() + assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all() + assert math.isnan(fmin(nan, 0)) + assert math.isnan(fmin(0, nan)) + assert math.isnan(fmin(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmin(nnan, nan)) == -1.0 + def test_fmod(self): from _numpypy import fmod import math @@ -455,6 +487,19 @@ assert math.isnan(sqrt(-1)) assert math.isnan(sqrt(nan)) + def test_square(self): + import math + from _numpypy import square + + nan, inf, ninf = float("nan"), float("inf"), float("-inf") + + assert math.isnan(square(nan)) + assert math.isinf(square(inf)) + assert math.isinf(square(ninf)) + assert square(ninf) > 0 + assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)] + assert math.isinf(square(1e300)) + def test_radians(self): import math from _numpypy import radians, array @@ -546,10 +591,11 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from _numpypy import bitwise_not, array + from _numpypy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() + assert (invert(a) == ~a).all() def test_comparisons(self): import operator diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -631,6 +631,22 @@ return math.fabs(v) @simple_binary_op + def fmax(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return max(v1, v2) + + @simple_binary_op + def fmin(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return min(v1, v2) + + @simple_binary_op def fmod(self, v1, v2): try: return math.fmod(v1, v2) @@ -741,6 +757,10 @@ except ValueError: return rfloat.NAN + @simple_unary_op + def square(self, v): + return v*v + @raw_unary_op def isnan(self, v): return rfloat.isnan(v) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -351,3 +351,23 @@ # the following assertion fails if the loop was cancelled due # to "abort: vable escape" assert len(log.loops_by_id("eval")) == 1 + + def test_sys_exc_info(self): + def main(): + i = 1 + lst = [i] + while i < 1000: + try: + return lst[i] + except: + e = sys.exc_info()[1] # ID: exc_info + if not isinstance(e, IndexError): + raise + i += 1 + return 42 + + log = self.run(main) + assert log.result == 42 + # the following assertion fails if the loop was cancelled due + # to "abort: vable escape" + assert len(log.loops_by_id("exc_info")) == 1 diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -595,3 +595,121 @@ assert len(frames) == 1 _, other_frame = frames.popitem() assert other_frame.f_code.co_name in ('other_thread', '?') + + +class AppTestSysExcInfoDirect: + + def setup_method(self, meth): + self.seen = [] + from pypy.module.sys import vm + def exc_info_with_tb(*args): + self.seen.append("n") # not optimized + return self.old[0](*args) + def exc_info_without_tb(*args): + self.seen.append("y") # optimized + return self.old[1](*args) + self.old = [vm.exc_info_with_tb, vm.exc_info_without_tb] + vm.exc_info_with_tb = exc_info_with_tb + vm.exc_info_without_tb = exc_info_without_tb + # + from pypy.rlib import jit + self.old2 = [jit.we_are_jitted] + jit.we_are_jitted = lambda: True + + def teardown_method(self, meth): + from pypy.module.sys import vm + from pypy.rlib import jit + vm.exc_info_with_tb = self.old[0] + vm.exc_info_without_tb = self.old[1] + jit.we_are_jitted = self.old2[0] + # + assert ''.join(self.seen) == meth.expected + + def test_returns_none(self): + import sys + assert sys.exc_info() == (None, None, None) + assert sys.exc_info()[0] is None + assert sys.exc_info()[1] is None + assert sys.exc_info()[2] is None + assert sys.exc_info()[:2] == (None, None) + assert sys.exc_info()[:3] == (None, None, None) + assert sys.exc_info()[0:2] == (None, None) + assert sys.exc_info()[2:4] == (None,) + test_returns_none.expected = 'nnnnnnnn' + + def test_returns_subscr(self): + import sys + e = KeyError("boom") + try: + raise e + except: + assert sys.exc_info()[0] is KeyError # y + assert sys.exc_info()[1] is e # y + assert sys.exc_info()[2] is not None # n + assert sys.exc_info()[-3] is KeyError # y + assert sys.exc_info()[-2] is e # y + assert sys.exc_info()[-1] is not None # n + test_returns_subscr.expected = 'yynyyn' + + def test_returns_slice_2(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[:0] == () # y + assert sys.exc_info()[:1] == foo[:1] # y + assert sys.exc_info()[:2] == foo[:2] # y + assert sys.exc_info()[:3] == foo # n + assert sys.exc_info()[:4] == foo # n + assert sys.exc_info()[:-1] == foo[:2] # y + assert sys.exc_info()[:-2] == foo[:1] # y + assert sys.exc_info()[:-3] == () # y + test_returns_slice_2.expected = 'nyyynnyyy' + + def test_returns_slice_3(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[2:2] == () # y + assert sys.exc_info()[0:1] == foo[:1] # y + assert sys.exc_info()[1:2] == foo[1:2] # y + assert sys.exc_info()[0:3] == foo # n + assert sys.exc_info()[2:4] == foo[2:] # n + assert sys.exc_info()[0:-1] == foo[:2] # y + assert sys.exc_info()[0:-2] == foo[:1] # y + assert sys.exc_info()[5:-3] == () # y + test_returns_slice_3.expected = 'nyyynnyyy' + + def test_strange_invocation(self): + import sys + e = KeyError("boom") + try: + raise e + except: + a = []; k = {} + assert sys.exc_info(*a)[:0] == () + assert sys.exc_info(**k)[:0] == () + test_strange_invocation.expected = 'nn' + + def test_call_in_subfunction(self): + import sys + def g(): + # this case is not optimized, because we need to search the + # frame chain. it's probably not worth the complications + return sys.exc_info()[1] + e = KeyError("boom") + try: + raise e + except: + assert g() is e + test_call_in_subfunction.expected = 'n' + + +class AppTestSysExcInfoDirectCallMethod(AppTestSysExcInfoDirect): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.opcodes.CALL_METHOD": True}) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -89,6 +89,9 @@ """Return the (type, value, traceback) of the most recent exception caught by an except clause in the current stack frame or in an older stack frame.""" + return exc_info_with_tb(space) # indirection for the tests + +def exc_info_with_tb(space): operror = space.getexecutioncontext().sys_exc_info() if operror is None: return space.newtuple([space.w_None,space.w_None,space.w_None]) @@ -96,6 +99,59 @@ return space.newtuple([operror.w_type, operror.get_w_value(space), space.wrap(operror.get_traceback())]) +def exc_info_without_tb(space, frame): + operror = frame.last_exception + return space.newtuple([operror.w_type, operror.get_w_value(space), + space.w_None]) + +def exc_info_direct(space, frame): + from pypy.tool import stdlib_opcode + # In order to make the JIT happy, we try to return (exc, val, None) + # instead of (exc, val, tb). We can do that only if we recognize + # the following pattern in the bytecode: + # CALL_FUNCTION/CALL_METHOD <-- invoking me + # LOAD_CONST 0, 1, -2 or -3 + # BINARY_SUBSCR + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST <=2 + # SLICE_2 + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST any integer + # LOAD_CONST <=2 + # SLICE_3 + need_all_three_args = True + co = frame.getcode().co_code + p = frame.last_instr + if (ord(co[p]) == stdlib_opcode.CALL_FUNCTION or + ord(co[p]) == stdlib_opcode.CALL_METHOD): + if ord(co[p+3]) == stdlib_opcode.LOAD_CONST: + lo = ord(co[p+4]) + hi = ord(co[p+5]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + constant = space.int_w(w_constant) + if ord(co[p+6]) == stdlib_opcode.BINARY_SUBSCR: + if -3 <= constant <= 1 and constant != -1: + need_all_three_args = False + elif ord(co[p+6]) == stdlib_opcode.SLICE+2: + if constant <= 2: + need_all_three_args = False + elif (ord(co[p+6]) == stdlib_opcode.LOAD_CONST and + ord(co[p+9]) == stdlib_opcode.SLICE+3): + lo = ord(co[p+7]) + hi = ord(co[p+8]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + if space.int_w(w_constant) <= 2: + need_all_three_args = False + # + if need_all_three_args or frame.last_exception is None or frame.hide(): + return exc_info_with_tb(space) + else: + return exc_info_without_tb(space, frame) + def exc_clear(space): """Clear global information on the current exception. Subsequent calls to exc_info() will return (None,None,None) until another exception is diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -21,7 +21,7 @@ FLOAT_ARRAY_PTR = lltype.Ptr(lltype.Array(rffi.FLOAT)) # these definitions are used only in tests, when not translated -def longlong2float_emulator(llval): +def longlong2float(llval): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 1) as d_array: ll_array = rffi.cast(LONGLONG_ARRAY_PTR, d_array) ll_array[0] = llval @@ -51,12 +51,6 @@ eci = ExternalCompilationInfo(includes=['string.h', 'assert.h'], post_include_bits=[""" -static double pypy__longlong2float(long long x) { - double dd; - assert(sizeof(double) == 8 && sizeof(long long) == 8); - memcpy(&dd, &x, 8); - return dd; -} static float pypy__uint2singlefloat(unsigned int x) { float ff; assert(sizeof(float) == 4 && sizeof(unsigned int) == 4); @@ -71,12 +65,6 @@ } """]) -longlong2float = rffi.llexternal( - "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, - _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__longlong2float") - uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, @@ -99,4 +87,17 @@ def specialize_call(self, hop): [v_float] = hop.inputargs(lltype.Float) - return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=hop.r_result) + hop.exception_cannot_occur() + return hop.genop("convert_float_bytes_to_longlong", [v_float], resulttype=lltype.SignedLongLong) + +class LongLong2FloatEntry(ExtRegistryEntry): + _about_ = longlong2float + + def compute_result_annotation(self, s_longlong): + assert annmodel.SomeInteger(knowntype=r_int64).contains(s_longlong) + return annmodel.SomeFloat() + + def specialize_call(self, hop): + [v_longlong] = hop.inputargs(lltype.SignedLongLong) + hop.exception_cannot_occur() + return hop.genop("convert_longlong_bytes_to_float", [v_longlong], resulttype=lltype.Float) diff --git a/pypy/rlib/test/test_longlong2float.py b/pypy/rlib/test/test_longlong2float.py --- a/pypy/rlib/test/test_longlong2float.py +++ b/pypy/rlib/test/test_longlong2float.py @@ -2,6 +2,7 @@ from pypy.rlib.longlong2float import longlong2float, float2longlong from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rpython.test.test_llinterp import interpret def fn(f1): @@ -31,6 +32,18 @@ res = fn2(x) assert repr(res) == repr(x) +def test_interpreted(): + def f(f1): + try: + ll = float2longlong(f1) + return longlong2float(ll) + except Exception: + return 500 + + for x in enum_floats(): + res = interpret(f, [x]) + assert repr(res) == repr(x) + # ____________________________________________________________ def fnsingle(f1): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -350,6 +350,7 @@ 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() 'convert_float_bytes_to_longlong': LLOp(canfold=True), + 'convert_longlong_bytes_to_float': LLOp(canfold=True), # __________ pointer operations __________ diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -431,6 +431,10 @@ from pypy.rlib.longlong2float import float2longlong return float2longlong(a) +def op_convert_longlong_bytes_to_float(a): + from pypy.rlib.longlong2float import longlong2float + return longlong2float(a) + def op_unichar_eq(x, y): assert isinstance(x, unicode) and len(x) == 1 diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -765,7 +765,11 @@ def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 - assert lgt >= 0 + # If start > stop, return a empty string. This can happen if the start + # is greater than the length of the string. Use < instead of <= to avoid + # creating another path for the JIT when start == stop. + if lgt < 0: + return s1.empty() newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -222,6 +222,10 @@ length = s.ll_strlen() if stop > length: stop = length + # If start > stop, return a empty string. This can happen if the start + # is greater than the length of the string. + if start > stop: + start = stop return s.ll_substring(start, stop-start) def ll_stringslice_minusone(s): diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -477,7 +477,11 @@ s1 = s[:3] s2 = s[3:] s3 = s[3:10] - return s1+s2 == s and s2+s1 == const('lohel') and s1+s3 == s + s4 = s[42:44] + return (s1+s2 == s and + s2+s1 == const('lohel') and + s1+s3 == s and + s4 == const('')) res = self.interpret(fn, [0]) assert res diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -38,7 +38,7 @@ closed_heads.reverse() for head, branch in closed_heads: - print '\t', branch + print '\t', head, '\t', branch print print 'The branches listed above will be merged to "closed-branches".' print 'You need to run this script in a clean working copy where you' diff --git a/pypy/translator/c/src/float.h b/pypy/translator/c/src/float.h --- a/pypy/translator/c/src/float.h +++ b/pypy/translator/c/src/float.h @@ -43,5 +43,6 @@ #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) #endif diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -243,4 +243,5 @@ 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], 'convert_float_bytes_to_longlong': jvm.PYPYDOUBLEBYTESTOLONG, + 'convert_longlong_bytes_to_float': jvm.PYPYLONGBYTESTODOUBLE, }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -942,6 +942,7 @@ PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYDOUBLEBYTESTOLONG = Method.v(jPyPy, 'pypy__float2longlong', (jDouble,), jLong) +PYPYLONGBYTESTODOUBLE = Method.v(jPyPy, 'pypy__longlong2float', (jLong,), jDouble) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) PYPYSTRTOLONG = Method.v(jPyPy, 'str_to_long', (jString,), jLong) From noreply at buildbot.pypy.org Fri Mar 30 13:14:48 2012 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Mar 2012 13:14:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix keepalive issues in buffer(array(..)). Message-ID: <20120330111448.5B38182252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54095:47c0f9458312 Date: 2012-03-30 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/47c0f9458312/ Log: Fix keepalive issues in buffer(array(..)). diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,7 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.objectmodel import specialize +from pypy.rlib.objectmodel import specialize, keepalive_until_here from pypy.rpython.lltypesystem import lltype, rffi @@ -145,18 +145,24 @@ unroll_typecodes = unrolling_iterable(types.keys()) class ArrayBuffer(RWBuffer): - def __init__(self, data, bytes): - self.data = data - self.len = bytes + def __init__(self, array): + self.array = array def getlength(self): - return self.len + return self.array.len * self.array.itemsize def getitem(self, index): - return self.data[index] + array = self.array + data = array._charbuf_start() + char = data[index] + array._charbuf_stop() + return char def setitem(self, index, char): - self.data[index] = char + array = self.array + data = array._charbuf_start() + data[index] = char + array._charbuf_stop() def make_array(mytype): @@ -278,9 +284,10 @@ oldlen = self.len new = len(s) / mytype.bytes self.setlen(oldlen + new) - cbuf = self.charbuf() + cbuf = self._charbuf_start() for i in range(len(s)): cbuf[oldlen * mytype.bytes + i] = s[i] + self._charbuf_stop() def fromlist(self, w_lst): s = self.len @@ -310,8 +317,11 @@ else: self.fromsequence(w_iterable) - def charbuf(self): - return rffi.cast(rffi.CCHARP, self.buffer) + def _charbuf_start(self): + return rffi.cast(rffi.CCHARP, self.buffer) + + def _charbuf_stop(self): + keepalive_until_here(self) def w_getitem(self, space, idx): item = self.buffer[idx] @@ -530,8 +540,10 @@ self.fromstring(space.str_w(w_s)) def array_tostring__Array(space, self): - cbuf = self.charbuf() - return self.space.wrap(rffi.charpsize2str(cbuf, self.len * mytype.bytes)) + cbuf = self._charbuf_start() + s = rffi.charpsize2str(cbuf, self.len * mytype.bytes) + self._charbuf_stop() + return self.space.wrap(s) def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n): if not isinstance(w_f, W_File): @@ -613,8 +625,7 @@ # Misc methods def buffer__Array(space, self): - b = ArrayBuffer(self.charbuf(), self.len * mytype.bytes) - return space.wrap(b) + return space.wrap(ArrayBuffer(self)) def array_buffer_info__Array(space, self): w_ptr = space.wrap(rffi.cast(lltype.Unsigned, self.buffer)) @@ -649,7 +660,7 @@ raise OperationError(space.w_RuntimeError, space.wrap(msg)) if self.len == 0: return - bytes = self.charbuf() + bytes = self._charbuf_start() tmp = [bytes[0]] * mytype.bytes for start in range(0, self.len * mytype.bytes, mytype.bytes): stop = start + mytype.bytes - 1 @@ -657,6 +668,7 @@ tmp[i] = bytes[start + i] for i in range(mytype.bytes): bytes[stop - i] = tmp[i] + self._charbuf_stop() def repr__Array(space, self): if self.len == 0: diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -433,7 +433,25 @@ a = self.array('h', 'Hi') buf = buffer(a) assert buf[1] == 'i' - #raises(TypeError, buf.__setitem__, 1, 'o') + + def test_buffer_write(self): + a = self.array('c', 'hello') + buf = buffer(a) + print repr(buf) + try: + buf[3] = 'L' + except TypeError: + skip("buffer(array) returns a read-only buffer on CPython") + assert a.tostring() == 'helLo' + + def test_buffer_keepalive(self): + buf = buffer(self.array('c', 'text')) + assert buf[2] == 'x' + # + a = self.array('c', 'foobarbaz') + buf = buffer(a) + a.fromstring('some extra text') + assert buf[:] == 'foobarbazsome extra text' def test_list_methods(self): assert repr(self.array('i')) == "array('i')" From noreply at buildbot.pypy.org Fri Mar 30 13:21:55 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:21:55 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup2: change branch name Message-ID: <20120330112155.ACFAC82252@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup2 Changeset: r54096:5002715399b0 Date: 2012-03-27 09:49 +0200 http://bitbucket.org/pypy/pypy/changeset/5002715399b0/ Log: change branch name From noreply at buildbot.pypy.org Fri Mar 30 13:21:56 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:21:56 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup_2: rename branch Message-ID: <20120330112156.D97378236B@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup_2 Changeset: r54097:92711c1d6eef Date: 2012-03-27 09:50 +0200 http://bitbucket.org/pypy/pypy/changeset/92711c1d6eef/ Log: rename branch From noreply at buildbot.pypy.org Fri Mar 30 13:21:58 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:21:58 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup2: merge to close branch Message-ID: <20120330112158.1226F82E47@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup2 Changeset: r54098:95c15fe476c5 Date: 2012-03-27 09:51 +0200 http://bitbucket.org/pypy/pypy/changeset/95c15fe476c5/ Log: merge to close branch From noreply at buildbot.pypy.org Fri Mar 30 13:22:10 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:22:10 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup2: merge from default Message-ID: <20120330112210.EE58982252@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup2 Changeset: r54099:ddd8b35460f5 Date: 2012-03-27 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/ddd8b35460f5/ Log: merge from default diff too long, truncating to 10000 out of 22022 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -486,7 +486,10 @@ L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) diff --git a/lib-python/2.7/test/test_xmlrpc.py b/lib-python/2.7/test/test_xmlrpc.py --- a/lib-python/2.7/test/test_xmlrpc.py +++ b/lib-python/2.7/test/test_xmlrpc.py @@ -308,7 +308,7 @@ global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() - #trying to connect to to "localhost" using all address families, which + #trying to connect to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) @@ -367,7 +367,7 @@ global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() - #trying to connect to to "localhost" using all address families, which + #trying to connect to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) @@ -472,6 +472,9 @@ # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) + def test_unicode_host(self): + server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT)) + self.assertEqual(server.add("a", u"\xe9"), u"a\xe9") # [ch] The test 404 is causing lots of false alarms. def XXXtest_404(self): @@ -586,6 +589,12 @@ # This avoids waiting for the socket timeout. self.test_simple1() + def test_partial_post(self): + # Check that a partial POST doesn't make the server loop: issue #14001. + conn = httplib.HTTPConnection(ADDR, PORT) + conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye') + conn.close() + class MultiPathServerTestCase(BaseServerTestCase): threadFunc = staticmethod(http_multi_server) request_count = 2 diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -311,7 +311,7 @@ RegrTest('test_mimetypes.py'), RegrTest('test_MimeWriter.py', core=False), RegrTest('test_minidom.py'), - RegrTest('test_mmap.py'), + RegrTest('test_mmap.py', usemodules="mmap"), RegrTest('test_module.py', core=True), RegrTest('test_modulefinder.py'), RegrTest('test_msilib.py', skip=only_win32), diff --git a/lib-python/modified-2.7/distutils/command/bdist_wininst.py b/lib-python/modified-2.7/distutils/command/bdist_wininst.py --- a/lib-python/modified-2.7/distutils/command/bdist_wininst.py +++ b/lib-python/modified-2.7/distutils/command/bdist_wininst.py @@ -298,7 +298,8 @@ bitmaplen, # number of bytes in bitmap ) file.write(header) - file.write(open(arcname, "rb").read()) + with open(arcname, "rb") as arcfile: + file.write(arcfile.read()) # create_exe() diff --git a/lib-python/modified-2.7/distutils/sysconfig_pypy.py b/lib-python/modified-2.7/distutils/sysconfig_pypy.py --- a/lib-python/modified-2.7/distutils/sysconfig_pypy.py +++ b/lib-python/modified-2.7/distutils/sysconfig_pypy.py @@ -60,6 +60,7 @@ g['EXE'] = "" g['SO'] = _get_so_extension() or ".so" g['SOABI'] = g['SO'].rsplit('.')[0] + g['LIBDIR'] = os.path.join(sys.prefix, 'lib') global _config_vars _config_vars = g diff --git a/lib-python/modified-2.7/opcode.py b/lib-python/modified-2.7/opcode.py --- a/lib-python/modified-2.7/opcode.py +++ b/lib-python/modified-2.7/opcode.py @@ -192,5 +192,6 @@ def_op('LOOKUP_METHOD', 201) # Index in name list hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' +def_op('BUILD_LIST_FROM_ARG', 203) del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py --- a/lib-python/modified-2.7/site.py +++ b/lib-python/modified-2.7/site.py @@ -550,9 +550,18 @@ "'import usercustomize' failed; use -v for traceback" +def import_builtin_stuff(): + """PyPy specific: pre-import a few built-in modules, because + some programs actually rely on them to be in sys.modules :-(""" + import exceptions + if 'zipimport' in sys.builtin_module_names: + import zipimport + + def main(): global ENABLE_USER_SITE + import_builtin_stuff() abs__file__() known_paths = removeduppaths() if (os.name == "posix" and sys.path and diff --git a/lib-python/modified-2.7/test/test_dis.py b/lib-python/modified-2.7/test/test_dis.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/test/test_dis.py @@ -0,0 +1,150 @@ +# Minimal tests for dis module + +from test.test_support import run_unittest +import unittest +import sys +import dis +import StringIO + + +def _f(a): + print a + return 1 + +dis_f = """\ + %-4d 0 LOAD_FAST 0 (a) + 3 PRINT_ITEM + 4 PRINT_NEWLINE + + %-4d 5 LOAD_CONST 1 (1) + 8 RETURN_VALUE +"""%(_f.func_code.co_firstlineno + 1, + _f.func_code.co_firstlineno + 2) + + +def bug708901(): + for res in range(1, + 10): + pass + +dis_bug708901 = """\ + %-4d 0 SETUP_LOOP 23 (to 26) + 3 LOAD_GLOBAL 0 (range) + 6 LOAD_CONST 1 (1) + + %-4d 9 LOAD_CONST 2 (10) + 12 CALL_FUNCTION 2 + 15 GET_ITER + >> 16 FOR_ITER 6 (to 25) + 19 STORE_FAST 0 (res) + + %-4d 22 JUMP_ABSOLUTE 16 + >> 25 POP_BLOCK + >> 26 LOAD_CONST 0 (None) + 29 RETURN_VALUE +"""%(bug708901.func_code.co_firstlineno + 1, + bug708901.func_code.co_firstlineno + 2, + bug708901.func_code.co_firstlineno + 3) + + +def bug1333982(x=[]): + assert 0, ([s for s in x] + + 1) + pass + +dis_bug1333982 = """\ + %-4d 0 LOAD_CONST 1 (0) + 3 POP_JUMP_IF_TRUE 38 + 6 LOAD_GLOBAL 0 (AssertionError) + 9 LOAD_FAST 0 (x) + 12 BUILD_LIST_FROM_ARG 0 + 15 GET_ITER + >> 16 FOR_ITER 12 (to 31) + 19 STORE_FAST 1 (s) + 22 LOAD_FAST 1 (s) + 25 LIST_APPEND 2 + 28 JUMP_ABSOLUTE 16 + + %-4d >> 31 LOAD_CONST 2 (1) + 34 BINARY_ADD + 35 RAISE_VARARGS 2 + + %-4d >> 38 LOAD_CONST 0 (None) + 41 RETURN_VALUE +"""%(bug1333982.func_code.co_firstlineno + 1, + bug1333982.func_code.co_firstlineno + 2, + bug1333982.func_code.co_firstlineno + 3) + +_BIG_LINENO_FORMAT = """\ +%3d 0 LOAD_GLOBAL 0 (spam) + 3 POP_TOP + 4 LOAD_CONST 0 (None) + 7 RETURN_VALUE +""" + +class DisTests(unittest.TestCase): + def do_disassembly_test(self, func, expected): + s = StringIO.StringIO() + save_stdout = sys.stdout + sys.stdout = s + dis.dis(func) + sys.stdout = save_stdout + got = s.getvalue() + # Trim trailing blanks (if any). + lines = got.split('\n') + lines = [line.rstrip() for line in lines] + expected = expected.split("\n") + import difflib + if expected != lines: + self.fail( + "events did not match expectation:\n" + + "\n".join(difflib.ndiff(expected, + lines))) + + def test_opmap(self): + self.assertEqual(dis.opmap["STOP_CODE"], 0) + self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst) + self.assertIn(dis.opmap["STORE_NAME"], dis.hasname) + + def test_opname(self): + self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST") + + def test_boundaries(self): + self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG) + self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT) + + def test_dis(self): + self.do_disassembly_test(_f, dis_f) + + def test_bug_708901(self): + self.do_disassembly_test(bug708901, dis_bug708901) + + def test_bug_1333982(self): + # This one is checking bytecodes generated for an `assert` statement, + # so fails if the tests are run with -O. Skip this test then. + if __debug__: + self.do_disassembly_test(bug1333982, dis_bug1333982) + + def test_big_linenos(self): + def func(count): + namespace = {} + func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"]) + exec func in namespace + return namespace['foo'] + + # Test all small ranges + for i in xrange(1, 300): + expected = _BIG_LINENO_FORMAT % (i + 2) + self.do_disassembly_test(func(i), expected) + + # Test some larger ranges too + for i in xrange(300, 5000, 10): + expected = _BIG_LINENO_FORMAT % (i + 2) + self.do_disassembly_test(func(i), expected) + +def test_main(): + run_unittest(DisTests) + + +if __name__ == "__main__": + test_main() diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py --- a/lib-python/modified-2.7/test/test_set.py +++ b/lib-python/modified-2.7/test/test_set.py @@ -1568,7 +1568,7 @@ for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint): for g in (G, I, Ig, L, R): expected = meth(data) - actual = meth(G(data)) + actual = meth(g(data)) if isinstance(expected, bool): self.assertEqual(actual, expected) else: diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py --- a/lib_pypy/_csv.py +++ b/lib_pypy/_csv.py @@ -414,7 +414,7 @@ def _parse_add_char(self, c): if len(self.field) + len(c) > _field_limit: - raise Error("field larget than field limit (%d)" % (_field_limit)) + raise Error("field larger than field limit (%d)" % (_field_limit)) self.field += c diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -31,24 +31,20 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(local): - def __init__(self): - self.errno = 0 - self.winerror = 0 -_error_object = ErrorObject() +_err = local() def get_errno(): - return _error_object.errno + return getattr(_err, "errno", 0) def set_errno(errno): - old_errno = _error_object.errno - _error_object.errno = errno + old_errno = get_errno() + _err.errno = errno return old_errno def get_last_error(): - return _error_object.winerror + return getattr(_err, "winerror", 0) def set_last_error(winerror): - old_winerror = _error_object.winerror - _error_object.winerror = winerror + old_winerror = get_last_error() + _err.winerror = winerror return old_winerror diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -3,7 +3,7 @@ from _ctypes.primitive import SimpleType, _SimpleCData from _ctypes.basics import ArgumentError, keepalive_key from _ctypes.basics import is_struct_shape -from _ctypes.builtin import set_errno, set_last_error +from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi import _ffi import sys @@ -350,16 +350,24 @@ def _call_funcptr(self, funcptr, *newargs): if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: - set_errno(_rawffi.get_errno()) + tmp = _rawffi.get_errno() + _rawffi.set_errno(get_errno()) + set_errno(tmp) if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: - set_last_error(_rawffi.get_last_error()) + tmp = _rawffi.get_last_error() + _rawffi.set_last_error(get_last_error()) + set_last_error(tmp) try: result = funcptr(*newargs) finally: if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: - set_errno(_rawffi.get_errno()) + tmp = _rawffi.get_errno() + _rawffi.set_errno(get_errno()) + set_errno(tmp) if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: - set_last_error(_rawffi.get_last_error()) + tmp = _rawffi.get_last_error() + _rawffi.set_last_error(get_last_error()) + set_last_error(tmp) # try: return self._build_result(self._restype_, result, newargs) diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py deleted file mode 100644 --- a/lib_pypy/_locale.py +++ /dev/null @@ -1,337 +0,0 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) diff --git a/lib_pypy/array.py b/lib_pypy/array.py deleted file mode 100644 --- a/lib_pypy/array.py +++ /dev/null @@ -1,531 +0,0 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... -try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer -except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py deleted file mode 100644 --- a/lib_pypy/binascii.py +++ /dev/null @@ -1,720 +0,0 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py --- a/lib_pypy/numpypy/core/numeric.py +++ b/lib_pypy/numpypy/core/numeric.py @@ -6,7 +6,7 @@ import _numpypy as multiarray # ARGH from numpypy.core.arrayprint import array2string - +newaxis = None def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): """ @@ -306,6 +306,125 @@ else: return multiarray.set_string_function(f, repr) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool((a1 == a2).all()) + +def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + maskna : bool or None, optional + If this is set to True, it forces the array to have an NA mask. + If this is set to False, it forces the array to not have an NA + mask. + ownmaskna : bool, optional + If this is set to True, forces the array to have a mask which + it owns. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, + maskna=maskna, ownmaskna=ownmaskna) + set_string_function(array_str, 0) set_string_function(array_repr, 1) @@ -319,4 +438,4 @@ False_ = bool_(False) True_ = bool_(True) e = math.e -pi = math.pi \ No newline at end of file +pi = math.pi diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_binascii.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import -import py -from lib_pypy import binascii - -# Create binary test data -data = "The quick brown fox jumps over the lazy dog.\r\n" -# Be slow so we don't depend on other modules -data += "".join(map(chr, xrange(256))) -data += "\r\nHello world.\n" - -def test_exceptions(): - # Check module exceptions - assert issubclass(binascii.Error, Exception) - assert issubclass(binascii.Incomplete, Exception) - -def test_functions(): - # Check presence of all functions - funcs = [] - for suffix in "base64", "hqx", "uu", "hex": - prefixes = ["a2b_", "b2a_"] - if suffix == "hqx": - prefixes.extend(["crc_", "rlecode_", "rledecode_"]) - for prefix in prefixes: - name = prefix + suffix - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - for name in ("hexlify", "unhexlify"): - assert callable(getattr(binascii, name)) - py.test.raises(TypeError, getattr(binascii, name)) - -def test_base64valid(): - # Test base64 with valid data - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_base64(line) - res = res + b - assert res == data - -def test_base64invalid(): - # Test base64 with random invalid characters sprinkled throughout - # (This requires a new version of binascii.) - MAX_BASE64 = 57 - lines = [] - for i in range(0, len(data), MAX_BASE64): - b = data[i:i+MAX_BASE64] - a = binascii.b2a_base64(b) - lines.append(a) - - fillers = "" - valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/" - for i in xrange(256): - c = chr(i) - if c not in valid: - fillers += c - def addnoise(line): - noise = fillers - ratio = len(line) // len(noise) - res = "" - while line and noise: - if len(line) // len(noise) > ratio: - c, line = line[0], line[1:] - else: - c, noise = noise[0], noise[1:] - res += c - return res + noise + line - res = "" - for line in map(addnoise, lines): - b = binascii.a2b_base64(line) - res += b - assert res == data - - # Test base64 with just invalid characters, which should return - # empty strings. TBD: shouldn't it raise an exception instead ? - assert binascii.a2b_base64(fillers) == '' - -def test_uu(): - MAX_UU = 45 - lines = [] - for i in range(0, len(data), MAX_UU): - b = data[i:i+MAX_UU] - a = binascii.b2a_uu(b) - lines.append(a) - res = "" - for line in lines: - b = binascii.a2b_uu(line) - res += b - assert res == data - - assert binascii.a2b_uu("\x7f") == "\x00"*31 - assert binascii.a2b_uu("\x80") == "\x00"*32 - assert binascii.a2b_uu("\xff") == "\x00"*31 - py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00") - py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!") - - py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!") - -def test_crc32(): - crc = binascii.crc32("Test the CRC-32 of") - crc = binascii.crc32(" this string.", crc) - assert crc == 1571220330 - - crc = binascii.crc32('frotz\n', 0) - assert crc == -372923920 - - py.test.raises(TypeError, binascii.crc32) - -def test_hex(): - # test hexlification - s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' - t = binascii.b2a_hex(s) - u = binascii.a2b_hex(t) - assert s == u - py.test.raises(TypeError, binascii.a2b_hex, t[:-1]) - py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q') - - # Verify the treatment of Unicode strings - assert binascii.hexlify(unicode('a', 'ascii')) == '61' - -def test_qp(): - # A test for SF bug 534347 (segfaults without the proper fix) - try: - binascii.a2b_qp("", **{1:1}) - except TypeError: - pass - else: - fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") - assert binascii.a2b_qp("= ") == "= " - assert binascii.a2b_qp("==") == "=" - assert binascii.a2b_qp("=AX") == "=AX" - py.test.raises(TypeError, binascii.b2a_qp, foo="bar") - assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00" - assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF" - target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" - assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target - -def test_empty_string(): - # A test for SF bug #1022953. Make sure SystemError is not raised. - for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp', - 'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx', - 'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu', - 'rledecode_hqx']: - f = getattr(binascii, n) - f('') - binascii.crc_hqx('', 0) - -def test_qp_bug_case(): - assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy' - assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20' - assert binascii.b2a_qp('y'*76, False, False) == 'y'*76 - assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20' - -def test_wrong_padding(): - s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ' - py.test.raises(binascii.Error, binascii.a2b_base64, s) - -def test_crap_after_padding(): - s = 'xxx=axxxx' - assert binascii.a2b_base64(s) == '\xc7\x1c' - -def test_wrong_args(): - # this should grow as a way longer list - py.test.raises(TypeError, binascii.a2b_base64, 42) diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_locale.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -import py -import sys - -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('locale.ctc.py') - -from lib_pypy import _locale - - -def setup_module(mod): - if sys.platform == 'darwin': - py.test.skip("Locale support on MacOSX is minimal and cannot be tested") - -class TestLocale: - def setup_class(cls): - cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC) - if sys.platform.startswith("win"): - cls.tloc = "en" - elif sys.platform.startswith("freebsd"): - cls.tloc = "en_US.US-ASCII" - else: - cls.tloc = "en_US.UTF8" - try: - _locale.setlocale(_locale.LC_NUMERIC, cls.tloc) - except _locale.Error: - py.test.skip("test locale %s not supported" % cls.tloc) - - def teardown_class(cls): - _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale) - - def test_format(self): - py.test.skip("XXX fix or kill me") - - def testformat(formatstr, value, grouping = 0, output=None): - if output: - print "%s %% %s =? %s ..." %\ - (repr(formatstr), repr(value), repr(output)), - else: - print "%s %% %s works? ..." % (repr(formatstr), repr(value)), - result = locale.format(formatstr, value, grouping = grouping) - assert result == output - - testformat("%f", 1024, grouping=1, output='1,024.000000') - testformat("%f", 102, grouping=1, output='102.000000') - testformat("%f", -42, grouping=1, output='-42.000000') - testformat("%+f", -42, grouping=1, output='-42.000000') - testformat("%20.f", -42, grouping=1, output=' -42') - testformat("%+10.f", -4200, grouping=1, output=' -4,200') - testformat("%-10.f", 4200, grouping=1, output='4,200 ') - - def test_getpreferredencoding(self): - py.test.skip("XXX fix or kill me") - # Invoke getpreferredencoding to make sure it does not cause exceptions - _locale.getpreferredencoding() - - # Test BSD Rune locale's bug for isctype functions. - def test_bsd_bug(self): - def teststrop(s, method, output): - print "%s.%s() =? %s ..." % (repr(s), method, repr(output)), - result = getattr(s, method)() - assert result == output - - oldlocale = _locale.setlocale(_locale.LC_CTYPE) - _locale.setlocale(_locale.LC_CTYPE, self.tloc) - try: - teststrop('\x20', 'isspace', True) - teststrop('\xa0', 'isspace', False) - teststrop('\xa1', 'isspace', False) - teststrop('\xc0', 'isalpha', False) - teststrop('\xc0', 'isalnum', False) - teststrop('\xc0', 'isupper', False) - teststrop('\xc0', 'islower', False) - teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc']) - teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0') - teststrop('\xcc\x85', 'lower', '\xcc\x85') - teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0') - finally: - _locale.setlocale(_locale.LC_CTYPE, oldlocale) diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_site_extra.py @@ -0,0 +1,13 @@ +import sys, os + + +def test_preimported_modules(): + lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', + 'exceptions', 'signal', 'sys', 'zipimport'] + g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" % + (sys.executable,)) + real_data = g.read() + g.close() + for name in lst: + quoted_name = repr(name) + assert quoted_name in real_data diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_struct_extra.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import -from lib_pypy import struct - -def test_simple(): - morezeros = '\x00' * (struct.calcsize('l')-4) - assert struct.pack(': big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) diff --git a/pypy/__init__.py b/pypy/__init__.py --- a/pypy/__init__.py +++ b/pypy/__init__.py @@ -1,1 +1,16 @@ # Empty + +# XXX Should be empty again, soon. +# XXX hack for win64: +# This patch must stay here until the END OF STAGE 1 +# When all tests work, this branch will be merged +# and the branch stage 2 is started, where we remove this patch. +import sys +if hasattr(sys, "maxsize"): + if sys.maxint != sys.maxsize: + sys.maxint = sys.maxsize + import warnings + warnings.warn("""\n +---> This win64 port is now in stage 1: sys.maxint was modified. +---> When pypy/__init__.py becomes empty again, we have reached stage 2. +""") diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py --- a/pypy/annotation/builtin.py +++ b/pypy/annotation/builtin.py @@ -37,7 +37,11 @@ try: realresult = func(*args) except (ValueError, OverflowError): - return s_ImpossibleValue # no possible answer for this precise input + # no possible answer for this precise input. Be conservative + # and keep the computation non-constant. Example: + # unichr(constant-that-doesn't-fit-16-bits) on platforms where + # the underlying Python has sys.maxunicode == 0xffff. + return s_result s_realresult = immutablevalue(realresult) if not s_result.contains(s_realresult): raise Exception("%s%r returned %r, which is not contained in %s" % ( @@ -163,7 +167,7 @@ r.const = False return r - assert not issubclass(typ, (int,long)) or typ in (bool, int), ( + assert not issubclass(typ, (int, long)) or typ in (bool, int, long), ( "for integers only isinstance(.,int|r_uint) are supported") if s_obj.is_constant(): @@ -297,7 +301,7 @@ def robjmodel_instantiate(s_clspbc): assert isinstance(s_clspbc, SomePBC) clsdef = None - more_than_one = len(s_clspbc.descriptions) + more_than_one = len(s_clspbc.descriptions) > 1 for desc in s_clspbc.descriptions: cdef = desc.getuniqueclassdef() if more_than_one: diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -134,13 +134,19 @@ if self.name not in homedef.classdesc.all_enforced_attrs: self.attr_allowed = False if not self.readonly: - raise NoSuchAttrError(homedef, self.name) + raise NoSuchAttrError( + "setting forbidden attribute %r on %r" % ( + self.name, homedef)) def modified(self, classdef='?'): self.readonly = False if not self.attr_allowed: - raise NoSuchAttrError(classdef, self.name) - + raise NoSuchAttrError( + "Attribute %r on %r should be read-only.\n" % (self.name, + classdef) + + "This error can be caused by another 'getattr' that promoted\n" + "the attribute here; the list of read locations is:\n" + + '\n'.join([str(loc[0]) for loc in self.read_locations])) class ClassDef(object): "Wraps a user class." diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -398,7 +398,6 @@ cls = pyobj base = object baselist = list(cls.__bases__) - baselist.reverse() # special case: skip BaseException in Python 2.5, and pretend # that all exceptions ultimately inherit from Exception instead @@ -408,17 +407,27 @@ elif baselist == [py.builtin.BaseException]: baselist = [Exception] + mixins_before = [] + mixins_after = [] for b1 in baselist: if b1 is object: continue if b1.__dict__.get('_mixin_', False): - self.add_mixin(b1) + if base is object: + mixins_before.append(b1) + else: + mixins_after.append(b1) else: assert base is object, ("multiple inheritance only supported " "with _mixin_: %r" % (cls,)) base = b1 + if mixins_before and mixins_after: + raise Exception("unsupported: class %r has mixin bases both" + " before and after the regular base" % (self,)) + self.add_mixins(mixins_after, check_not_in=base) + self.add_mixins(mixins_before) + self.add_sources_for_class(cls) - self.add_sources_for_class(cls) if base is not object: self.basedesc = bookkeeper.getdesc(base) @@ -480,14 +489,30 @@ return self.classdict[name] = Constant(value) - def add_mixin(self, base): - for subbase in base.__bases__: - if subbase is object: - continue - assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non" - "mixin base class %r" % (base, subbase)) - self.add_mixin(subbase) - self.add_sources_for_class(base, mixin=True) + def add_mixins(self, mixins, check_not_in=object): + if not mixins: + return + A = type('tmp', tuple(mixins) + (object,), {}) + mro = A.__mro__ + assert mro[0] is A and mro[-1] is object + mro = mro[1:-1] + # + skip = set() + def add(cls): + if cls is not object: + for base in cls.__bases__: + add(base) + for name in cls.__dict__: + skip.add(name) + add(check_not_in) + # + for base in reversed(mro): + assert base.__dict__.get("_mixin_", False), ("Mixin class %r has non" + "mixin base class %r" % (mixins, base)) + for name, value in base.__dict__.items(): + if name in skip: + continue + self.add_source_attribute(name, value, mixin=True) def add_sources_for_class(self, cls, mixin=False): for name, value in cls.__dict__.items(): diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -786,12 +786,15 @@ # # safety check that no-one is trying to make annotation and translation # faster by providing the -O option to Python. -try: - assert False -except AssertionError: - pass # fine -else: - raise RuntimeError("The annotator relies on 'assert' statements from the\n" +import os +if "WINGDB_PYTHON" not in os.environ: + # ...but avoiding this boring check in the IDE + try: + assert False + except AssertionError: + pass # fine + else: + raise RuntimeError("The annotator relies on 'assert' statements from the\n" "\tannotated program: you cannot run it with 'python -O'.") # this has the side-effect of registering the unary and binary operations diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -1,15 +1,12 @@ from __future__ import with_statement -import autopath import py.test import sys from pypy import conftest -from pypy.tool.udir import udir from pypy.annotation import model as annmodel from pypy.annotation.annrpython import RPythonAnnotator as _RPythonAnnotator from pypy.translator.translator import graphof as tgraphof from pypy.annotation import policy -from pypy.annotation import specialize from pypy.annotation.listdef import ListDef, ListChangeUnallowed from pypy.annotation.dictdef import DictDef from pypy.objspace.flow.model import * @@ -2431,6 +2428,52 @@ assert isinstance(s.items[1], annmodel.SomeChar) assert isinstance(s.items[2], annmodel.SomeChar) + def test_mixin_first(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Base(object): + def foo(self): return 5 + class Concrete(Mixin, Base): + pass + def f(): + return Concrete().foo() + + assert f() == 4 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 4 + + def test_mixin_last(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Base(object): + def foo(self): return 5 + class Concrete(Base, Mixin): + pass + def f(): + return Concrete().foo() + + assert f() == 5 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 5 + + def test_mixin_concrete(self): + class Mixin(object): + _mixin_ = True + def foo(self): return 4 + class Concrete(Mixin): + def foo(self): return 5 + def f(): + return Concrete().foo() + + assert f() == 5 + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == 5 + def test_multiple_mixins_mro(self): # an obscure situation, but it occurred in module/micronumpy/types.py class A(object): @@ -2510,6 +2553,26 @@ s = a.build_types(f, [int]) assert s.knowntype == int + def test_slots_reads(self): + class A(object): + __slots__ = () + class B(A): + def __init__(self, x): + self.x = x + def f(x): + if x: + a = A() + else: + a = B(x) + return a.x # should explode here + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + # this should explode on reading the attribute 'a.x', but it can + # sometimes explode on 'self.x = x', which does not make much sense. + # But it looks hard to fix in general: we don't know yet during 'a.x' + # if the attribute x will be read-only or read-write. + def test_unboxed_value(self): class A(object): __slots__ = () diff --git a/pypy/bin/rpython b/pypy/bin/rpython new file mode 100644 --- /dev/null +++ b/pypy/bin/rpython @@ -0,0 +1,18 @@ +#!/usr/bin/env pypy + +"""RPython translation usage: + +rpython target + +run with --help for more information +""" + +import sys +from pypy.translator.goal.translate import main + +# no implicit targets +if len(sys.argv) == 1: + print __doc__ + sys.exit(1) + +main() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -176,9 +176,6 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("geninterp", "specify whether geninterp should be used", - default=False), - BoolOption("logbytecodes", "keep track of bytecode usage", default=False), @@ -392,10 +389,6 @@ config.objspace.std.suggest(withsmalllong=True) # xxx other options? ropes maybe? - # completely disable geninterp in a level 0 translation - if level == '0': - config.objspace.suggest(geninterp=False) - # some optimizations have different effects depending on the typesystem if type_system == 'ootype': config.objspace.std.suggest(multimethods="doubledispatch") diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -182,11 +182,6 @@ # Flags of the TranslationContext: BoolOption("simplifying", "Simplify flow graphs", default=True), - BoolOption("builtins_can_raise_exceptions", - "When true, assume any call to a 'simple' builtin such as " - "'hex' can raise an arbitrary exception", - default=False, - cmdline=None), BoolOption("list_comprehension_operations", "When true, look for and special-case the sequence of " "operations that results from a list comprehension and " diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/win64_todo.txt @@ -0,0 +1,9 @@ +2011-11-04 +ll_os.py has a problem with the file rwin32.py. +Temporarily disabled for the win64_gborg branch. This needs to be +investigated and re-enabled. +Resolved, enabled. + +2011-11-05 +test_typed.py needs explicit tests to ensure that we +handle word sizes right. \ No newline at end of file diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -103,21 +103,13 @@ * A concurrent garbage collector (a lot of work) -Remove the GIL --------------- +STM, a.k.a. "remove the GIL" +---------------------------- -This is a major task that requires lots of thinking. However, few subprojects -can be potentially specified, unless a better plan can be thought out: +Removing the GIL --- or more precisely, a GIL-less thread-less solution --- +is `now work in progress.`__ Contributions welcome. -* A thread-aware garbage collector - -* Better RPython primitives for dealing with concurrency - -* JIT passes to remove locks on objects - -* (maybe) implement locking in Python interpreter - -* alternatively, look at Software Transactional Memory +.. __: http://pypy.org/tmdonate.html Introduce new benchmarks ------------------------ diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -82,7 +82,10 @@ In pypy/translator/goal:: - ./translate.py --sandbox targetpypystandalone.py + ./translate.py -O2 --sandbox targetpypystandalone.py + +If you don't have a regular PyPy installed, you should, because it's +faster to translate, but you can also run ``python translate.py`` instead. To run it, use the tools in the pypy/translator/sandbox directory:: diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -18,7 +18,8 @@ Edition. Other configurations may work as well. The translation scripts will set up the appropriate environment variables -for the compiler. They will attempt to locate the same compiler version that +for the compiler, so you do not need to run vcvars before translation. +They will attempt to locate the same compiler version that was used to build the Python interpreter doing the translation. Failing that, they will pick the most recent Visual Studio compiler they can find. In addition, the target architecture @@ -26,7 +27,7 @@ using a 32 bit Python and vice versa. **Note:** PyPy is currently not supported for 64 bit Windows, and translation -will be aborted in this case. +will fail in this case. The compiler is all you need to build pypy-c, but it will miss some modules that relies on third-party libraries. See below how to get @@ -57,7 +58,8 @@ install third-party libraries. We chose to install them in the parent directory of the pypy checkout. For example, if you installed pypy in ``d:\pypy\trunk\`` (This directory contains a README file), the base -directory is ``d:\pypy``. +directory is ``d:\pypy``. You may choose different values by setting the +INCLUDE, LIB and PATH (for DLLs) The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -126,18 +128,54 @@ ------------------------ You can compile pypy with the mingw compiler, using the --cc=mingw32 option; -mingw.exe must be on the PATH. +gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be +the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit +compiler creating a 64 bit target. -libffi for the mingw32 compiler +libffi for the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable the _rawffi (and ctypes) module, you need to compile a mingw32 -version of libffi. I downloaded the `libffi source files`_, and extracted -them in the base directory. Then run:: +To enable the _rawffi (and ctypes) module, you need to compile a mingw +version of libffi. Here is one way to do this, wich should allow you to try +to build for win64 or win32: + +#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw +#. If you do not use cygwin, you will need msys to provide make, + autoconf tools and other goodies. + + #. Download and unzip a `msys for mingw`_, say into c:\msys + #. Edit the c:\msys\etc\fstab file to mount c:\mingw + +#. Download and unzip the `libffi source files`_, and extract + them in the base directory. +#. Run c:\msys\msys.bat or a cygwin shell which should make you + feel better since it is a shell prompt with shell tools. +#. From inside the shell, cd to the libffi directory and do:: sh ./configure make cp .libs/libffi-5.dll +If you can't find the dll, and the libtool issued a warning about +"undefined symbols not allowed", you will need to edit the libffi +Makefile in the toplevel directory. Add the flag -no-undefined to +the definition of libffi_la_LDFLAGS + +If you wish to experiment with win64, you must run configure with flags:: + + sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 + +or such, depending on your mingw64 download. + +hacking on Pypy with the mingw compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Since hacking on Pypy means running tests, you will need a way to specify +the mingw compiler when hacking (as opposed to translating). As of +March 2012, --cc is not a valid option for pytest.py. However if you set an +environment variable CC it will allow you to choose a compiler. + +.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds +.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/you-want-to-help.rst @@ -0,0 +1,86 @@ + +You want to help with PyPy, now what? +===================================== + +PyPy is a very large project that has a reputation of being hard to dive into. +Some of this fame is warranted, some of it is purely accidental. There are three +important lessons that everyone willing to contribute should learn: + +* PyPy has layers. There are many pieces of architecture that are very well + separated from each other. More about this below, but often the manifestation + of this is that things are at a different layer than you would expect them + to be. For example if you are looking for the JIT implementation, you will + not find it in the implementation of the Python programming language. + +* Because of the above, we are very serious about Test Driven Development. + It's not only what we believe in, but also that PyPy's architecture is + working very well with TDD in mind and not so well without it. Often + the development means progressing in an unrelated corner, one unittest + at a time; and then flipping a giant switch, bringing it all together. + (It generally works out of the box. If it doesn't, then we didn't + write enough unit tests.) It's worth repeating - PyPy + approach is great if you do TDD, not so great otherwise. + +* PyPy uses an entirely different set of tools - most of them included + in the PyPy repository. There is no Makefile, nor autoconf. More below + +Architecture +============ + +PyPy has layers. The 100 miles view: + +* `RPython`_ is the language in which we write interpreters. Not the entire + PyPy project is written in RPython, only the parts that are compiled in + the translation process. The interesting point is that RPython has no parser, + it's compiled from the live python objects, which make it possible to do + all kinds of metaprogramming during import time. In short, Python is a meta + programming language for RPython. + + The RPython standard library is to be found in the ``rlib`` subdirectory. + +.. _`RPython`: coding-guide.html#RPython + +* The translation toolchain - this is the part that takes care about translating + RPython to flow graphs and then to C. There is more in the `architecture`_ + document written about it. + + It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``. + +.. _`architecture`: architecture.html + +* Python Interpreter + + xxx + +* Python modules + + xxx + +* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the + interpreter written in RPython, rather than the user program that it + interprets. As a result it applies to any interpreter, i.e. any + language. But getting it to work correctly is not trivial: it + requires a small number of precise "hints" and possibly some small + refactorings of the interpreter. The JIT itself also has several + almost-independent parts: the tracer itself in ``jit/metainterp``, the + optimizer in ``jit/metainterp/optimizer`` that optimizes a list of + residual operations, and the backend in ``jit/backend/`` + that turns it into machine code. Writing a new backend is a + traditional way to get into the project. + +.. _`we have a tracing JIT`: jit/index.html + +* Garbage Collectors (GC): as you can notice if you are used to CPython's + C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. + `Garbage collection in PyPy`_ is inserted + during translation. Moreover, this is not reference counting; it is a real + GC written as more RPython code. The best one we have so far is in + ``rpython/memory/gc/minimark.py``. + +.. _`Garbage collection in PyPy`: garbage_collection.html + + +Toolset +======= + +xxx diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -610,6 +610,8 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + + ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -965,7 +965,7 @@ self.emit_op_arg(ops.CALL_METHOD, (kwarg_count << 8) | arg_count) return True - def _listcomp_generator(self, gens, gen_index, elt): + def _listcomp_generator(self, gens, gen_index, elt, single=False): start = self.new_block() skip = self.new_block() if_cleanup = self.new_block() @@ -973,6 +973,8 @@ gen = gens[gen_index] assert isinstance(gen, ast.comprehension) gen.iter.walkabout(self) + if single: + self.emit_op_arg(ops.BUILD_LIST_FROM_ARG, 0) self.emit_op(ops.GET_ITER) self.use_next_block(start) self.emit_jump(ops.FOR_ITER, anchor) @@ -998,8 +1000,12 @@ def visit_ListComp(self, lc): self.update_position(lc.lineno) - self.emit_op_arg(ops.BUILD_LIST, 0) - self._listcomp_generator(lc.generators, 0, lc.elt) + if len(lc.generators) != 1 or lc.generators[0].ifs: + single = False + self.emit_op_arg(ops.BUILD_LIST, 0) + else: + single = True + self._listcomp_generator(lc.generators, 0, lc.elt, single=single) def _comp_generator(self, node, generators, gen_index): start = self.new_block() diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -10,16 +10,6 @@ from pypy.interpreter.astcompiler import ast, consts -try: - all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - - class TestAstBuilder: def setup_class(cls): diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -58,7 +58,8 @@ w_res = pyco_expr.exec_host_bytecode(w_dict, w_dict) res = space.str_w(space.repr(w_res)) if not isinstance(expected, float): - assert res == repr(expected) + noL = lambda expr: expr.replace('L', '') + assert noL(res) == noL(repr(expected)) else: # Float representation can vary a bit between interpreter # versions, compare the numbers instead. @@ -908,3 +909,17 @@ return d['f'](5) """) assert 'generator' in space.str_w(space.repr(w_generator)) + + def test_list_comprehension(self): + source = "def f(): [i for i in l]" + source2 = "def f(): [i for i in l for j in l]" + source3 = "def f(): [i for i in l if i]" + counts = self.count_instructions(source) + assert ops.BUILD_LIST not in counts + assert counts[ops.BUILD_LIST_FROM_ARG] == 1 + counts = self.count_instructions(source2) + assert counts[ops.BUILD_LIST] == 1 + assert ops.BUILD_LIST_FROM_ARG not in counts + counts = self.count_instructions(source3) + assert counts[ops.BUILD_LIST] == 1 + assert ops.BUILD_LIST_FROM_ARG not in counts diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -7,7 +7,8 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id +from pypy.rlib.objectmodel import we_are_translated, newlist_hint,\ + compute_unique_id from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint @@ -833,7 +834,7 @@ items = [] else: try: - items = newlist(lgt_estimate) + items = newlist_hint(lgt_estimate) except MemoryError: items = [] # it might have lied # @@ -1335,7 +1336,7 @@ if not self.is_true(self.isinstance(w_obj, self.w_str)): raise OperationError(self.w_TypeError, self.wrap('argument must be a string')) - return self.str_w(w_obj) + return self.str_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -47,6 +47,11 @@ def async(self, space): "Check if this is an exception that should better not be caught." + if not space.full_exceptions: + # flow objspace does not support such exceptions and more + # importantly, raises KeyboardInterrupt if you try to access + # space.w_KeyboardInterrupt + return False return (self.match(space, space.w_SystemExit) or self.match(space, space.w_KeyboardInterrupt)) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -901,24 +901,20 @@ def __init__(self, source, filename=None, modname='__builtin__'): # HAAACK (but a good one) + self.filename = filename + self.source = str(py.code.Source(source).deindent()) + self.modname = modname if filename is None: f = sys._getframe(1) filename = '<%s:%d>' % (f.f_code.co_filename, f.f_lineno) + if not os.path.exists(filename): + # make source code available for tracebacks + lines = [x + "\n" for x in source.split("\n")] + py.std.linecache.cache[filename] = (1, None, lines, filename) self.filename = filename - self.source = str(py.code.Source(source).deindent()) - self.modname = modname - # look at the first three lines for a NOT_RPYTHON tag - first = "\n".join(source.split("\n", 3)[:3]) - if "NOT_RPYTHON" in first: - self.can_use_geninterp = False - else: - self.can_use_geninterp = True - # make source code available for tracebacks - lines = [x + "\n" for x in source.split("\n")] - py.std.linecache.cache[filename] = (1, None, lines, filename) def __repr__(self): - return "" % (self.filename, self.can_use_geninterp) + return "" % (self.filename,) def getwdict(self, space): return space.fromcache(ApplevelCache).getorbuild(self) @@ -979,10 +975,7 @@ def build(self, app): "NOT_RPYTHON. Called indirectly by Applevel.getwdict()." - if self.space.config.objspace.geninterp and app.can_use_geninterp: - return PyPyCacheDir.build_applevelinterp_dict(app, self.space) - else: - return build_applevel_dict(app, self.space) + return build_applevel_dict(app, self.space) # __________ pure applevel version __________ @@ -996,157 +989,6 @@ filename=self.filename) return w_glob -# __________ geninterplevel version __________ - -class PyPyCacheDir: - "NOT_RPYTHON" - # similar to applevel, but using translation to interp-level. - # This version maintains a cache folder with single files. - - def build_applevelinterp_dict(cls, self, space): - "NOT_RPYTHON" - # N.B. 'self' is the ApplevelInterp; this is a class method, - # just so that we have a convenient place to store the global state. - if not cls._setup_done: - cls._setup() - - from pypy.translator.geninterplevel import translate_as_module - import marshal - scramble = md5(cls.seed) - scramble.update(marshal.dumps(self.source)) - key = scramble.hexdigest() - initfunc = cls.known_code.get(key) - if not initfunc: - # try to get it from file - name = key - if self.filename: - prename = os.path.splitext(os.path.basename(self.filename))[0] - else: - prename = 'zznoname' - name = "%s_%s" % (prename, name) - try: - __import__("pypy._cache."+name) - except ImportError, x: - # print x - pass - else: - initfunc = cls.known_code[key] - if not initfunc: - # build it and put it into a file - initfunc, newsrc = translate_as_module( - self.source, self.filename, self.modname) - fname = cls.cache_path.join(name+".py").strpath - f = file(get_tmp_file_name(fname), "w") - print >> f, """\ -# self-destruct on double-click: -if __name__ == "__main__": - from pypy import _cache - import os - namestart = os.path.join(os.path.split(_cache.__file__)[0], '%s') - for ending in ('.py', '.pyc', '.pyo'): - try: - os.unlink(namestart+ending) - except os.error: - pass""" % name - print >> f - print >> f, newsrc - print >> f, "from pypy._cache import known_code" - print >> f, "known_code[%r] = %s" % (key, initfunc.__name__) - f.close() - rename_tmp_to_eventual_file_name(fname) - w_glob = initfunc(space) - return w_glob - build_applevelinterp_dict = classmethod(build_applevelinterp_dict) - - _setup_done = False - - def _setup(cls): - """NOT_RPYTHON""" - lp = py.path.local - import pypy, os - p = lp(pypy.__file__).new(basename='_cache').ensure(dir=1) - cls.cache_path = p - ini = p.join('__init__.py') - try: - if not ini.check(): - raise ImportError # don't import if only a .pyc file left!!! - from pypy._cache import known_code, \ - GI_VERSION_RENDERED - except ImportError: - GI_VERSION_RENDERED = 0 - from pypy.translator.geninterplevel import GI_VERSION - cls.seed = md5(str(GI_VERSION)).digest() - if GI_VERSION != GI_VERSION_RENDERED or GI_VERSION is None: - for pth in p.listdir(): - if pth.check(file=1): - try: - pth.remove() - except: pass - f = file(get_tmp_file_name(str(ini)), "w") - f.write("""\ -# This folder acts as a cache for code snippets which have been -# compiled by compile_as_module(). -# It will get a new entry for every piece of code that has -# not been seen, yet. -# -# Caution! Only the code snippet is checked. If something -# is imported, changes are not detected. Also, changes -# to geninterplevel or gateway are also not checked. -# Exception: There is a checked version number in geninterplevel.py -# -# If in doubt, remove this file from time to time. - -GI_VERSION_RENDERED = %r - -known_code = {} - -# self-destruct on double-click: -def harakiri(): - import pypy._cache as _c - import py - lp = py.path.local - for pth in lp(_c.__file__).dirpath().listdir(): - try: - pth.remove() - except: pass - -if __name__ == "__main__": - harakiri() - -del harakiri -""" % GI_VERSION) - f.close() - rename_tmp_to_eventual_file_name(str(ini)) - import pypy._cache - cls.known_code = pypy._cache.known_code - cls._setup_done = True - _setup = classmethod(_setup) - - -def gethostname(_cache=[]): - if not _cache: - try: - import socket - hostname = socket.gethostname() - except: - hostname = '' - _cache.append(hostname) - return _cache[0] - -def get_tmp_file_name(fname): - return '%s~%s~%d' % (fname, gethostname(), os.getpid()) - -def rename_tmp_to_eventual_file_name(fname): - # generated files are first written to the host- and process-specific - # file 'tmpname', and then atomically moved to their final 'fname' - # to avoid problems if py.py is started several times in parallel - tmpname = get_tmp_file_name(fname) - try: - os.rename(tmpname, fname) - except (OSError, IOError): - os.unlink(fname) # necessary on Windows - os.rename(tmpname, fname) - # ____________________________________________________________ def appdef(source, applevel=ApplevelClass, filename=None): @@ -1184,11 +1026,6 @@ return build_applevel_dict(self, space) -class applevelinterp_temp(ApplevelClass): - hidden_applevel = False - def getwdict(self, space): # no cache - return PyPyCacheDir.build_applevelinterp_dict(self, space) - # app2interp_temp is used for testing mainly def app2interp_temp(func, applevel_temp=applevel_temp, filename=None): """ NOT_RPYTHON """ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -15,9 +15,8 @@ from pypy.rlib.rarithmetic import r_uint, intmask from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.debug import check_nonneg -from pypy.tool.stdlib_opcode import (bytecode_spec, host_bytecode_spec, - unrolling_all_opcode_descs, opmap, - host_opmap) +from pypy.tool.stdlib_opcode import (bytecode_spec, + unrolling_all_opcode_descs) def unaryoperation(operationname): """NOT_RPYTHON""" @@ -713,6 +712,19 @@ w_list = self.space.newlist(items) self.pushvalue(w_list) + def BUILD_LIST_FROM_ARG(self, _, next_instr): + # this is a little dance, because list has to be before the + # value + last_val = self.popvalue() + try: + lgt = self.space.len_w(last_val) + except OperationError, e: + if e.async(self.space): + raise + lgt = 0 # oh well + self.pushvalue(self.space.newlist([], sizehint=lgt)) + self.pushvalue(last_val) + def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() @@ -1419,11 +1431,9 @@ if lastchar.isspace() and lastchar != ' ': return file_softspace(stream, True) - print_item_to._annspecialcase_ = "specialize:argtype(0)" def print_item(x): print_item_to(x, sys_stdout()) - print_item._annspecialcase_ = "flowspace:print_item" def print_newline_to(stream): stream.write("\n") @@ -1431,7 +1441,6 @@ def print_newline(): print_newline_to(sys_stdout()) - print_newline._annspecialcase_ = "flowspace:print_newline" def file_softspace(file, newflag): try: diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -1,6 +1,6 @@ import py -from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp, applevelinterp_temp +from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp from pypy.interpreter.error import OperationError def test_execwith_novars(space): @@ -82,9 +82,6 @@ w_res = g(space, space.wrap(10), space.wrap(1)) assert space.eq_w(w_res, space.wrap(-9)) -def test_applevelinterp_functions(space): - test_applevel_functions(space, applevel_temp = applevelinterp_temp) - def test_applevel_class(space, applevel_temp = applevel_temp): app = applevel_temp(''' class C(object): @@ -99,9 +96,6 @@ w_clsattr = space.getattr(c, space.wrap('attr')) assert space.eq_w(w_clsattr, space.wrap(17)) -def test_applevelinterp_class(space): - test_applevel_class(space, applevel_temp = applevelinterp_temp) - def app_test_something_at_app_level(): x = 2 assert x/2 == 1 @@ -161,7 +155,7 @@ w_str = space1.getattr(w_mymod1, space1.wrap("hi")) assert space1.str_w(w_str) == "hello" - def test_geninterp_can_unfreeze(self): + def test_random_stuff_can_unfreeze(self): # When a module contains an "import" statement in applevel code, the # imported module is initialized, possibly after it has been already # frozen. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -101,14 +101,6 @@ g3 = gateway.app2interp_temp(noapp_g3, gateway.applevel_temp) assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar')) - def test_app2interp2(self): - """same but using transformed code""" - w = self.space.wrap - def noapp_g3(a, b): - return a+b - g3 = gateway.app2interp_temp(noapp_g3, gateway.applevelinterp_temp) - assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar')) - def test_app2interp_general_args(self): w = self.space.wrap def app_general(x, *args, **kwds): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -312,8 +312,8 @@ mods = space.get_builtinmodule_to_install() assert '__pypy__' in mods # real builtin - assert 'array' not in mods # in lib_pypy - assert 'faked+array' not in mods # in lib_pypy + assert '_functools' not in mods # in lib_pypy + assert 'faked+_functools' not in mods # in lib_pypy assert 'this_doesnt_exist' not in mods # not in lib_pypy assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in # ALL_BUILTIN_MODULES diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -75,6 +75,7 @@ class AppTestInterpObjectPickling: pytestmark = py.test.mark.skipif("config.option.runappdirect") def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) _attach_helpers(cls.space) def teardown_class(cls): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -599,7 +599,7 @@ # if convenient for the backend, we compute the info about # the flag as (byte-offset, single-byte-flag). import struct - value = struct.pack("l", flag_word) + value = struct.pack(lltype.SignedFmt, flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 i = 0 while value[i] == '\x00': i += 1 diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -321,7 +321,7 @@ except KeyError: pass # 'var' is already not in a register - def loc(self, box): + def loc(self, box, must_exist=False): """ Return the location of 'box'. """ self._check_type(box) @@ -332,6 +332,8 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg + if must_exist: + return self.frame_manager.bindings[box] return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -360,7 +362,7 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + prev_loc = self.loc(v, must_exist=True) if prev_loc is self.frame_reg and selected_reg is None: return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -148,7 +148,7 @@ # def get_alignment(code): # Retrieve default alignment for the compiler/platform - return struct.calcsize('l' + code) - struct.calcsize(code) + return struct.calcsize(lltype.SignedFmt + code) - struct.calcsize(code) assert descr1.basesize == get_alignment('c') assert descr2.basesize == get_alignment('p') assert descr3.basesize == get_alignment('p') diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -2,6 +2,7 @@ from pypy.jit.codewriter.longlong import is_64_bit from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * +from pypy.rlib.rarithmetic import is_emulated_long class FakeCPU: @@ -43,7 +44,7 @@ assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False - if not is_64_bit: + if not is_64_bit or is_emulated_long: descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong, None, 42) assert descr is None # missing longlongs diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -11,6 +11,7 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.rlib.rarithmetic import is_valid_int def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -103,7 +104,7 @@ gcrootmap.put(retaddr, shapeaddr) assert gcrootmap._gcmap[0] == retaddr assert gcrootmap._gcmap[1] == shapeaddr - p = rffi.cast(rffi.LONGP, gcrootmap.gcmapstart()) + p = rffi.cast(rffi.SIGNEDP, gcrootmap.gcmapstart()) assert p[0] == retaddr assert (gcrootmap.gcmapend() == gcrootmap.gcmapstart() + rffi.sizeof(lltype.Signed) * 2) @@ -419,9 +420,9 @@ assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() - assert isinstance(wbdescr.jit_wb_if_flag, int) - assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) - assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) + assert is_valid_int(wbdescr.jit_wb_if_flag) + assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) + assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) def test_get_rid_of_debug_merge_point(self): operations = [ diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -1,4 +1,4 @@ - +import py from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan @@ -236,6 +236,16 @@ assert isinstance(loc, FakeFramePos) assert len(asm.moves) == 1 + def test_bogus_make_sure_var_in_reg(self): + b0, = newboxes(0) + longevity = {b0: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.next_instruction() + # invalid call to make_sure_var_in_reg(): box unknown so far + py.test.raises(KeyError, rm.make_sure_var_in_reg, b0) + def test_return_constant(self): asm = MockAsm() boxes, longevity = boxes_and_longevity(5) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -16,9 +16,11 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException from pypy.jit.codewriter import heaptracker, longlong -from pypy.rlib.rarithmetic import intmask +from pypy.rlib import longlong2float +from pypy.rlib.rarithmetic import intmask, is_valid_int from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -493,7 +495,7 @@ if cpu.supports_floats: def func(f, i): assert isinstance(f, float) - assert isinstance(i, int) + assert is_valid_int(i) return f - float(i) FPTR = self.Ptr(self.FuncType([lltype.Float, lltype.Signed], lltype.Float)) @@ -1496,13 +1498,30 @@ c_nest, c_nest], 'void') def test_read_timestamp(self): + if sys.platform == 'win32': + # windows quite often is very inexact (like the old Intel 8259 PIC), + # so we stretch the time a little bit. + # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini, + # the test starts working at delay == 21670 and stops at 20600000. + # We take the geometric mean value. + from math import log, exp + delay_min = 21670 + delay_max = 20600000 + delay = int(exp((log(delay_min)+log(delay_max))/2)) + def wait_a_bit(): + for i in xrange(delay): pass + else: + def wait_a_bit(): + pass if longlong.is_64_bit: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') res1 = got1.getint() res2 = got2.getint() else: got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + wait_a_bit() got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') res1 = got1.getlonglong() res2 = got2.getlonglong() @@ -1598,6 +1617,12 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_convert_float_bytes(self): + t = 'int' if longlong.is_64_bit else 'float' + res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, + [boxfloat(2.5)], t).value + assert res == longlong2float.float2longlong(2.5) + def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) diff --git a/pypy/jit/backend/test/support.py b/pypy/jit/backend/test/support.py --- a/pypy/jit/backend/test/support.py +++ b/pypy/jit/backend/test/support.py @@ -3,6 +3,7 @@ from pypy.rlib.debug import debug_print from pypy.translator.translator import TranslationContext, graphof from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from pypy.rlib.rarithmetic import is_valid_int class BaseCompiledMixin(object): @@ -24,7 +25,7 @@ from pypy.annotation import model as annmodel for arg in args: - assert isinstance(arg, int) + assert is_valid_int(arg) self.pre_translation_hook() t = self._get_TranslationContext() diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -449,6 +449,7 @@ OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT)) OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT)) +OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG)) OperationBuilder.OPERATIONS = OPERATIONS @@ -502,11 +503,11 @@ else: assert 0, "unknown backend %r" % pytest.config.option.backend -# ____________________________________________________________ +# ____________________________________________________________ class RandomLoop(object): dont_generate_more = False - + def __init__(self, cpu, builder_factory, r, startvars=None): self.cpu = cpu if startvars is None: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -606,7 +606,7 @@ else: assert token struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): @@ -665,7 +665,7 @@ ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] operations.extend(ops) - + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: @@ -836,8 +836,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(get_ebp_ofs(loc.position)) - self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) + self.mc.PUSH_b(loc.value + 4) + self.mc.PUSH_b(loc.value) else: self.mc.PUSH(loc) @@ -847,8 +847,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(get_ebp_ofs(loc.position + 1)) - self.mc.POP_b(get_ebp_ofs(loc.position)) + self.mc.POP_b(loc.value) + self.mc.POP_b(loc.value + 4) else: self.mc.POP(loc) @@ -1242,6 +1242,15 @@ self.mc.MOVD_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): + loc0, = arglocs + if longlong.is_64_bit: + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD(resloc, loc0) + else: + self.mov(loc0, resloc) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1954,8 +1963,6 @@ mc.PUSH_r(ebx.value) elif IS_X86_64: mc.MOV_rr(edi.value, ebx.value) - # XXX: Correct to only align the stack on 64-bit? - mc.AND_ri(esp.value, -16) else: raise AssertionError("Shouldn't happen") @@ -2117,9 +2124,12 @@ # First, we need to save away the registers listed in # 'save_registers' that are not callee-save. XXX We assume that # the XMM registers won't be modified. We store them in - # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the - # single argument to closestack_addr below. - p = WORD + # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP] + # for the single argument to closestack_addr below. + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_sr(p, reg.value) @@ -2174,7 +2184,10 @@ # self._emit_call(-1, imm(self.releasegil_addr), args) # Finally, restore the registers saved above. - p = WORD + if IS_X86_32: + p = WORD + elif IS_X86_64: + p = 0 for reg in self._regalloc.rm.save_around_call_regs: if reg in save_registers: self.mc.MOV_rs(reg.value, p) diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py --- a/pypy/jit/backend/x86/codebuf.py +++ b/pypy/jit/backend/x86/codebuf.py @@ -19,8 +19,8 @@ class MachineCodeBlockWrapper(BlockBuilderMixin, - codebuilder_cls, - LocationCodeBuilder): + LocationCodeBuilder, + codebuilder_cls): def __init__(self): self.init_block_builder() # a list of relative positions; for each position p, the bytes diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -766,6 +766,19 @@ consider_cast_singlefloat_to_float = consider_cast_int_to_float + def consider_convert_float_bytes_to_longlong(self, op): + if longlong.is_64_bit: + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + else: + arg0 = op.getarg(0) + loc0 = self.xrm.loc(arg0) + loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0]) + self.Perform(op, [loc0], loc1) + self.xrm.possibly_free_var(op.getarg(0)) + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -601,9 +601,12 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) + # These work on machine sized registers, so MOVD is actually MOVQ + # when running on 64 bits. Note a bug in the Intel documentation: + # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html + MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -36,15 +36,15 @@ # ____________________________________________________________ -if sys.platform == 'win32': - ensure_sse2_floats = lambda : None - # XXX check for SSE2 on win32 too +if WORD == 4: + extra = ['-DPYPY_X86_CHECK_SSE2'] else: - if WORD == 4: - extra = ['-DPYPY_X86_CHECK_SSE2'] - else: - extra = [] - ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( - compile_extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra, - )) + extra = [] + +if sys.platform != 'win32': + extra = ['-msse2', '-mfpmath=sse', + '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra + +ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( + compile_extra = extra, +)) diff --git a/pypy/jit/backend/x86/test/conftest.py b/pypy/jit/backend/x86/test/conftest.py --- a/pypy/jit/backend/x86/test/conftest.py +++ b/pypy/jit/backend/x86/test/conftest.py @@ -1,4 +1,4 @@ -import py +import py, os from pypy.jit.backend import detect_cpu cpu = detect_cpu.autodetect() @@ -6,5 +6,7 @@ if cpu not in ('x86', 'x86_64'): py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) if cpu == 'x86_64': + if os.name == "nt": + py.test.skip("Windows cannot allocate non-reserved memory") from pypy.rpython.lltypesystem import ll2ctypes ll2ctypes.do_allocation_in_far_regions() diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -182,6 +182,12 @@ filename = str(testdir.join(FILENAME % methname)) g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) + # + if instrname == 'MOVD' and self.WORD == 8: + instrname = 'MOVQ' + if argmodes == 'xb': + py.test.skip('"as" uses an undocumented alternate encoding??') + # for args in args_lists: suffix = "" ## all = instr.as_all_suffixes @@ -229,9 +235,6 @@ # movq $xxx, %rax => movl $xxx, %eax suffix = 'l' ops[1] = reduce_to_32bit(ops[1]) - if instrname.lower() == 'movd': - ops[0] = reduce_to_32bit(ops[0]) - ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/backend/x86/test/test_zmath.py b/pypy/jit/backend/x86/test/test_zmath.py --- a/pypy/jit/backend/x86/test/test_zmath.py +++ b/pypy/jit/backend/x86/test/test_zmath.py @@ -6,6 +6,8 @@ from pypy.translator.c.test.test_genc import compile from pypy.jit.backend.x86.support import ensure_sse2_floats from pypy.rlib import rfloat +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_print def get_test_case((fnname, args, expected)): @@ -16,16 +18,32 @@ expect_valueerror = (expected == ValueError) expect_overflowerror = (expected == OverflowError) check = test_direct.get_tester(expected) + unroll_args = unrolling_iterable(args) # def testfn(): + debug_print('calling', fnname, 'with arguments:') + for arg in unroll_args: + debug_print('\t', arg) try: got = fn(*args) except ValueError: - return expect_valueerror + if expect_valueerror: + return True + else: + debug_print('unexpected ValueError!') + return False except OverflowError: - return expect_overflowerror + if expect_overflowerror: + return True + else: + debug_print('unexpected OverflowError!') + return False else: - return check(got) + if check(got): + return True + else: + debug_print('unexpected result:', got) + return False # testfn.func_name = 'test_' + fnname return testfn diff --git a/pypy/jit/backend/x86/tool/viewcode.py b/pypy/jit/backend/x86/tool/viewcode.py --- a/pypy/jit/backend/x86/tool/viewcode.py +++ b/pypy/jit/backend/x86/tool/viewcode.py @@ -34,7 +34,7 @@ # I am porting it in a lazy fashion... See py-utils/xam.py if sys.platform == "win32": - XXX # lots more in Psyco + pass # lots more in Psyco def machine_code_dump(data, originaddr, backend_name, label_list=None): objdump_backend_option = { diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -291,6 +291,11 @@ op1 = SpaceOperation('-live-', [], None) return [op, op1] + def _noop_rewrite(self, op): + return op + + rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite + # ---------- # Various kinds of calls @@ -365,7 +370,7 @@ def handle_builtin_call(self, op): oopspec_name, args = support.decode_builtin_call(op) # dispatch to various implementations depending on the oopspec_name - if oopspec_name.startswith('list.') or oopspec_name == 'newlist': + if oopspec_name.startswith('list.') or oopspec_name.startswith('newlist'): prepare = self._handle_list_call elif oopspec_name.startswith('stroruni.'): prepare = self._handle_stroruni_call @@ -1494,6 +1499,14 @@ arraydescr, v_length], op.result) + def do_resizable_newlist_hint(self, op, args, arraydescr, lengthdescr, + itemsdescr, structdescr): + v_hint = self._get_initial_newlist_length(op, args) + return SpaceOperation('newlist_hint', + [structdescr, lengthdescr, itemsdescr, + arraydescr, v_hint], + op.result) + def do_resizable_list_getitem(self, op, args, arraydescr, lengthdescr, itemsdescr, structdescr): v_index, extraop = self._prepare_list_getset(op, lengthdescr, args, diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -144,6 +144,10 @@ _ll_1_newlist.need_result_type = True _ll_2_newlist.need_result_type = True +def _ll_1_newlist_hint(LIST, hint): + return LIST.ll_newlist_hint(hint) +_ll_1_newlist_hint.need_result_type = True + def _ll_1_list_len(l): return l.ll_length() def _ll_2_list_getitem(l, index): diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -968,6 +968,21 @@ int_return %i2 """, transform=True) + def test_convert_float_bytes_to_int(self): + from pypy.rlib.longlong2float import float2longlong + def f(x): + return float2longlong(x) + if longlong.is_64_bit: + result_var = "%i0" + return_op = "int_return" + else: + result_var = "%f1" + return_op = "float_return" + self.encoding_test(f, [25.0], """ + convert_float_bytes_to_longlong %%f0 -> %(result_var)s + %(return_op)s %(result_var)s + """ % {"result_var": result_var, "return_op": return_op}) + def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys -from pypy.rlib.rarithmetic import r_longlong, intmask +from pypy.rlib.rarithmetic import r_longlong, intmask, is_valid_int from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.objspace.flow.model import Block, Link from pypy.translator.unsimplify import varoftype @@ -32,7 +32,7 @@ def test_functions(): xll = longlong.getfloatstorage(3.5) assert longlong.getrealfloat(xll) == 3.5 - assert isinstance(longlong.gethash(xll), int) + assert is_valid_int(longlong.gethash(xll)) class TestLongLong: diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,15 +1,16 @@ +from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr +from pypy.jit.metainterp.compile import ResumeAtPositionDescr +from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise +from pypy.rlib import longlong2float +from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.rtimer import read_timestamp -from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import debug_start, debug_stop, ll_assert -from pypy.rlib.debug import make_sure_not_resized from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from pypy.jit.codewriter import heaptracker, longlong -from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise -from pypy.jit.metainterp.compile import ResumeAtPositionDescr + def arguments(*argtypes, **kwds): resulttype = kwds.pop('returns', None) @@ -20,6 +21,9 @@ return function return decorate +LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f' + + class LeaveFrame(JitException): pass @@ -663,6 +667,11 @@ a = float(a) return longlong.getfloatstorage(a) + @arguments("f", returns=LONGLONG_TYPECODE) + def bhimpl_convert_float_bytes_to_longlong(a): + a = longlong.getrealfloat(a) + return longlong2float.float2longlong(a) + # ---------- # control flow operations @@ -982,6 +991,15 @@ cpu.bh_setfield_gc_r(result, itemsdescr, items) return result + @arguments("cpu", "d", "d", "d", "d", "i", returns="r") + def bhimpl_newlist_hint(cpu, structdescr, lengthdescr, itemsdescr, + arraydescr, lengthhint): + result = cpu.bh_new(structdescr) + cpu.bh_setfield_gc_i(result, lengthdescr, 0) + items = cpu.bh_new_array(arraydescr, lengthhint) + cpu.bh_setfield_gc_r(result, itemsdescr, items) + return result + @arguments("cpu", "r", "d", "d", "i", returns="i") def bhimpl_getlistitem_gc_i(cpu, lst, itemsdescr, arraydescr, index): items = cpu.bh_getfield_gc_r(lst, itemsdescr) @@ -1176,14 +1194,14 @@ def bhimpl_getinteriorfield_gc_f(cpu, array, index, descr): return cpu.bh_getinteriorfield_gc_f(array, index, descr) - @arguments("cpu", "r", "i", "d", "i") - def bhimpl_setinteriorfield_gc_i(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "i", "d") + def bhimpl_setinteriorfield_gc_i(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_i(array, index, descr, value) - @arguments("cpu", "r", "i", "d", "r") - def bhimpl_setinteriorfield_gc_r(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "r", "d") + def bhimpl_setinteriorfield_gc_r(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_r(array, index, descr, value) - @arguments("cpu", "r", "i", "d", "f") - def bhimpl_setinteriorfield_gc_f(cpu, array, index, descr, value): + @arguments("cpu", "r", "i", "f", "d") + def bhimpl_setinteriorfield_gc_f(cpu, array, index, value, descr): cpu.bh_setinteriorfield_gc_f(array, index, descr, value) @arguments("cpu", "r", "d", returns="i") @@ -1300,7 +1318,7 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=(longlong.is_64_bit and "i" or "f")) + @arguments(returns=LONGLONG_TYPECODE) def bhimpl_ll_read_timestamp(): return read_timestamp() diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -2,7 +2,7 @@ """ from pypy.rpython.lltypesystem import lltype, rstr -from pypy.rlib.rarithmetic import ovfcheck, r_longlong +from pypy.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr @@ -248,7 +248,7 @@ def do_read_timestamp(cpu, _): x = read_timestamp() if longlong.is_64_bit: - assert isinstance(x, int) # 64-bit + assert is_valid_int(x) # 64-bit return BoxInt(x) else: assert isinstance(x, r_longlong) # 32-bit diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -4,7 +4,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.objectmodel import we_are_translated, Symbolic from pypy.rlib.objectmodel import compute_unique_id -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, is_valid_int + from pypy.conftest import option from pypy.jit.metainterp.resoperation import ResOperation, rop @@ -213,7 +214,7 @@ def __init__(self, value): if not we_are_translated(): - if isinstance(value, int): + if is_valid_int(value): value = int(value) # bool -> int else: assert isinstance(value, Symbolic) @@ -448,7 +449,7 @@ def __init__(self, value=0): if not we_are_translated(): - if isinstance(value, int): + if is_valid_int(value): value = int(value) # bool -> int else: assert isinstance(value, Symbolic) diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,10 +1,9 @@ -from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.history import BoxInt, ConstInt -import sys -MAXINT = sys.maxint -MININT = -sys.maxint - 1 +MAXINT = maxint +MININT = -maxint - 1 class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -16,8 +15,8 @@ self.lower = lower # check for unexpected overflows: if not we_are_translated(): - assert type(upper) is not long - assert type(lower) is not long + assert type(upper) is not long or is_valid_int(upper) + assert type(lower) is not long or is_valid_int(lower) # Returns True if the bound was updated def make_le(self, other): diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -10,6 +10,8 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rpython import annlowlevel from pypy.rpython.lltypesystem import lltype, rstr +from pypy.rlib.rarithmetic import is_valid_int + class StrOrUnicode(object): @@ -730,7 +732,7 @@ for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) - assert isinstance(value, int) and value != 0 + assert is_valid_int(value) and value != 0 result.append((value, getattr(OptString, name))) return unrolling_iterable(result) opt_call_oopspec_ops = _findall_call_oopspec() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -223,6 +223,7 @@ 'cast_float_to_singlefloat', 'cast_singlefloat_to_float', 'float_neg', 'float_abs', 'cast_ptr_to_int', 'cast_int_to_ptr', + 'convert_float_bytes_to_longlong', ]: exec py.code.Source(''' @arguments("box") @@ -509,6 +510,15 @@ self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) return sbox + @arguments("descr", "descr", "descr", "descr", "box") + def opimpl_newlist_hint(self, structdescr, lengthdescr, itemsdescr, + arraydescr, sizehintbox): + sbox = self.opimpl_new(structdescr) + self._opimpl_setfield_gc_any(sbox, lengthdescr, history.CONST_FALSE) + abox = self.opimpl_new_array(arraydescr, sizehintbox) + self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) + return sbox + @arguments("box", "descr", "descr", "box") def _opimpl_getlistitem_gc_any(self, listbox, itemsdescr, arraydescr, indexbox): diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -419,6 +419,7 @@ 'CAST_INT_TO_FLOAT/1', # need some messy code in the backend 'CAST_FLOAT_TO_SINGLEFLOAT/1', 'CAST_SINGLEFLOAT_TO_FLOAT/1', + 'CONVERT_FLOAT_BYTES_TO_LONGLONG/1', # 'INT_LT/2b', 'INT_LE/2b', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3,6 +3,7 @@ import py from pypy import conftest +from pypy.jit.codewriter import longlong from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT @@ -14,7 +15,8 @@ loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, promote_string, set_param, record_known_class) -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.longlong2float import float2longlong +from pypy.rlib.rarithmetic import ovfcheck, is_valid_int from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -292,7 +294,7 @@ assert res == f(6, sys.maxint, 32, 48) res = self.meta_interp(f, [sys.maxint, 6, 32, 48]) assert res == f(sys.maxint, 6, 32, 48) - + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -953,7 +955,7 @@ self.meta_interp(f, [20], repeat=7) # the loop and the entry path as a single trace self.check_jitcell_token_count(1) - + # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path @@ -1470,7 +1472,7 @@ assert res == f(299) self.check_resops(guard_class=0, guard_nonnull=4, guard_nonnull_class=4, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1499,7 +1501,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1528,7 +1530,7 @@ assert res == f(299) self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, guard_nonnull_class=0, guard_isnull=2) - + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -2296,7 +2298,7 @@ self.check_resops(int_rshift=3) bigval = 1 - while (bigval << 3).__class__ is int: + while is_valid_int(bigval << 3): bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 @@ -2341,7 +2343,7 @@ self.check_resops(int_rshift=3) bigval = 1 - while (bigval << 3).__class__ is int: + while is_valid_int(bigval << 3): bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 @@ -2636,7 +2638,7 @@ return sa assert self.meta_interp(f, [20]) == f(20) self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) - + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2677,7 +2679,7 @@ assert self.meta_interp(f, [20, 3]) == f(20, 3) self.check_jitcell_token_count(1) self.check_target_token_count(5) - + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2815,7 +2817,7 @@ for cell in get_stats().get_all_jitcell_tokens(): # Initialal trace with two labels and 5 retraces assert len(cell.target_tokens) <= 7 - + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -3784,6 +3786,25 @@ assert res == 11 * 12 * 13 self.check_operations_history(int_add=3, int_mul=2) + def test_setinteriorfield(self): + A = lltype.GcArray(lltype.Struct('S', ('x', lltype.Signed))) + a = lltype.malloc(A, 5, immortal=True) + def g(n): + a[n].x = n + 2 + return a[n].x + res = self.interp_operations(g, [1]) + assert res == 3 + + def test_float2longlong(self): + def f(n): + return float2longlong(n) + + for x in [2.5, float("nan"), -2.5, float("inf")]: + # There are tests elsewhere to verify the correctness of this. + expected = float2longlong(x) + res = self.interp_operations(f, [x]) + assert longlong.getfloatstorage(res) == expected + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,4 +1,5 @@ import py +from pypy.rlib.objectmodel import newlist_hint from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -228,6 +229,28 @@ self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + def test_newlist_hint(self): + def f(i): + l = newlist_hint(i) + l[0] = 55 + return len(l) + + r = self.interp_operations(f, [3]) + assert r == 0 + + def test_newlist_hint_optimized(self): + driver = JitDriver(greens = [], reds = ['i']) + + def f(i): + while i > 0: + driver.jit_merge_point(i=i) + l = newlist_hint(5) + l.append(1) + i -= l[0] + + self.meta_interp(f, [10], listops=True) + self.check_resops(new_array=0, call=0) + class TestOOtype(ListTests, OOJitMixin): pass diff --git a/pypy/jit/tl/tlc.py b/pypy/jit/tl/tlc.py --- a/pypy/jit/tl/tlc.py +++ b/pypy/jit/tl/tlc.py @@ -6,6 +6,8 @@ from pypy.jit.tl.tlopcode import * from pypy.jit.tl import tlopcode from pypy.rlib.jit import JitDriver, elidable +from pypy.rlib.rarithmetic import is_valid_int + class Obj(object): @@ -219,7 +221,7 @@ class Frame(object): def __init__(self, args, pc): - assert isinstance(pc, int) + assert is_valid_int(pc) self.args = args self.pc = pc self.stack = [] @@ -239,7 +241,7 @@ return interp_eval(code, pc, args, pool).int_o() def interp_eval(code, pc, args, pool): - assert isinstance(pc, int) + assert is_valid_int(pc) frame = Frame(args, pc) pc = frame.pc diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -8,8 +8,6 @@ from __pypy__ import lookup_special def _caller_locals(): - # note: the reason why this is working is because the functions in here are - # compiled by geninterp, so they don't have a frame return sys._getframe(0).f_locals def vars(*obj): @@ -26,17 +24,6 @@ except AttributeError: raise TypeError, "vars() argument must have __dict__ attribute" -# Replaced by the interp-level helper space.callable(): -##def callable(ob): -## import __builtin__ # XXX this is insane but required for now for geninterp -## for c in type(ob).__mro__: -## if '__call__' in c.__dict__: -## if isinstance(ob, __builtin__._instance): # old style instance! -## return getattr(ob, '__call__', None) is not None -## return True -## else: -## return False - def dir(*args): """dir([object]) -> list of strings diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -69,6 +69,10 @@ return W_MemoryView(buf) def descr_buffer(self, space): + """Note that memoryview() objects in PyPy support buffer(), whereas + not in CPython; but CPython supports passing memoryview() to most + built-in functions that accept buffers, with the notable exception + of the buffer() built-in.""" return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -1,5 +1,5 @@ +import sys -# Package initialisation from pypy.interpreter.mixedmodule import MixedModule from pypy.module.imp.importing import get_pyc_magic @@ -12,6 +12,21 @@ "UnicodeBuilder": "interp_builders.W_UnicodeBuilder", } +class TimeModule(MixedModule): + appleveldefs = {} + interpleveldefs = {} + if sys.platform.startswith("linux"): + from pypy.module.__pypy__ import interp_time + interpleveldefs["clock_gettime"] = "interp_time.clock_gettime" + interpleveldefs["clock_getres"] = "interp_time.clock_getres" + for name in [ + "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW", + "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID" + ]: + if getattr(interp_time, name) is not None: + interpleveldefs[name] = "space.wrap(interp_time.%s)" % name + + class Module(MixedModule): appleveldefs = { } @@ -32,6 +47,7 @@ submodules = { "builders": BuildersModule, + "time": TimeModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_time.py @@ -0,0 +1,65 @@ +from __future__ import with_statement +import sys + +from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.gateway import unwrap_spec +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rpython.tool import rffi_platform +from pypy.translator.tool.cbuild import ExternalCompilationInfo + + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=["time.h"], + libraries=["rt"], + ) + + HAS_CLOCK_GETTIME = rffi_platform.Has('clock_gettime') + + CLOCK_REALTIME = rffi_platform.DefinedConstantInteger("CLOCK_REALTIME") + CLOCK_MONOTONIC = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC") + CLOCK_MONOTONIC_RAW = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC_RAW") + CLOCK_PROCESS_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_PROCESS_CPUTIME_ID") + CLOCK_THREAD_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_THREAD_CPUTIME_ID") + + TIMESPEC = rffi_platform.Struct("struct timespec", [ + ("tv_sec", rffi.TIME_T), + ("tv_nsec", rffi.LONG), + ]) + +cconfig = rffi_platform.configure(CConfig) + +HAS_CLOCK_GETTIME = cconfig["HAS_CLOCK_GETTIME"] + +CLOCK_REALTIME = cconfig["CLOCK_REALTIME"] +CLOCK_MONOTONIC = cconfig["CLOCK_MONOTONIC"] +CLOCK_MONOTONIC_RAW = cconfig["CLOCK_MONOTONIC_RAW"] +CLOCK_PROCESS_CPUTIME_ID = cconfig["CLOCK_PROCESS_CPUTIME_ID"] +CLOCK_THREAD_CPUTIME_ID = cconfig["CLOCK_THREAD_CPUTIME_ID"] + +TIMESPEC = cconfig["TIMESPEC"] + +c_clock_gettime = rffi.llexternal("clock_gettime", + [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, + compilation_info=CConfig._compilation_info_, threadsafe=False +) +c_clock_getres = rffi.llexternal("clock_getres", + [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, + compilation_info=CConfig._compilation_info_, threadsafe=False +) + + at unwrap_spec(clk_id="c_int") +def clock_gettime(space, clk_id): + with lltype.scoped_alloc(TIMESPEC) as tp: + ret = c_clock_gettime(clk_id, tp) + if ret != 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9) + + at unwrap_spec(clk_id="c_int") +def clock_getres(space, clk_id): + with lltype.scoped_alloc(TIMESPEC) as tp: + ret = c_clock_getres(clk_id, tp) + if ret != 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9) diff --git a/pypy/module/__pypy__/test/test_time.py b/pypy/module/__pypy__/test/test_time.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_time.py @@ -0,0 +1,26 @@ +import py + +from pypy.module.__pypy__.interp_time import HAS_CLOCK_GETTIME + + +class AppTestTime(object): + def setup_class(cls): + if not HAS_CLOCK_GETTIME: + py.test.skip("need time.clock_gettime") + + def test_clock_realtime(self): + from __pypy__ import time + res = time.clock_gettime(time.CLOCK_REALTIME) + assert isinstance(res, float) + + def test_clock_monotonic(self): + from __pypy__ import time + a = time.clock_gettime(time.CLOCK_MONOTONIC) + b = time.clock_gettime(time.CLOCK_MONOTONIC) + assert a <= b + + def test_clock_getres(self): + from __pypy__ import time + res = time.clock_getres(time.CLOCK_REALTIME) + assert res > 0.0 + assert res <= 1.0 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,9 +1,10 @@ import py - +from pypy.conftest import gettestobjspace class AppTestAST: def setup_class(cls): + cls.space = gettestobjspace(usemodules=['struct']) cls.w_ast = cls.space.appexec([], """(): import _ast return _ast""") diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -4,7 +4,7 @@ class AppTestCodecs: def setup_class(cls): - space = gettestobjspace(usemodules=('unicodedata',)) + space = gettestobjspace(usemodules=('unicodedata', 'struct')) cls.space = space def test_register_noncallable(self): diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -106,8 +106,9 @@ version = 0 def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_continuation',), + cls.space = gettestobjspace(usemodules=('_continuation', 'struct'), CALL_METHOD=True) + cls.space.config.translation.continuation = True cls.space.appexec([], """(): global continulet, A, __name__ diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct']) def test_simple(self): import _hashlib diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -158,7 +158,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -92,7 +92,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal', + 'struct', 'array')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -2,6 +2,7 @@ from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) + class AppTestSemaphore: def setup_class(cls): space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket', 'array']) + mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -372,10 +372,9 @@ def test_socket_connect(self): import _socket, os s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) - # XXX temporarily we use python.org to test, will have more robust tests - # in the absence of a network connection later when more parts of the - # socket API are implemented. Currently skip the test if there is no - # connection. + # it would be nice to have a test which works even if there is no + # network connection. However, this one is "good enough" for now. Skip + # it if there is no connection. try: s.connect(("www.python.org", 80)) except _socket.gaierror, ex: diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -2,6 +2,7 @@ import os import py + class AppTestSSL: def setup_class(cls): space = gettestobjspace(usemodules=('_ssl', '_socket')) @@ -29,7 +30,6 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, (int, long)) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) @@ -90,7 +90,7 @@ class AppTestConnectedSSL: def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space def setup_method(self, method): @@ -179,7 +179,7 @@ # to exercise the poll() calls def setup_class(cls): - space = gettestobjspace(usemodules=('_ssl', '_socket')) + space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct')) cls.space = space cls.space.appexec([], """(): import socket; socket.setdefaulttimeout(1) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -824,6 +824,8 @@ pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") pypy_decls.append("#endif\n") + pypy_decls.append('#define Signed long /* xxx temporary fix */\n') + pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') for decl in FORWARD_DECLS: pypy_decls.append("%s;" % (decl,)) @@ -855,6 +857,8 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + pypy_decls.append('#undef Signed /* xxx temporary fix */\n') + pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("}") pypy_decls.append("#endif") diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -10,7 +10,7 @@ [('next', PyInterpreterState)], PyInterpreterStateStruct) PyThreadState = lltype.Ptr(cpython_struct( - "PyThreadState", + "PyThreadState", [('interp', PyInterpreterState), ('dict', PyObject), ])) @@ -19,12 +19,15 @@ def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread support is enabled) and reset the thread state to NULL, returning the - previous thread state (which is not NULL except in PyPy). If the lock has been created, + previous thread state. If the lock has been created, the current thread must have acquired it. (This function is available even when thread support is disabled at compile time.)""" + state = space.fromcache(InterpreterState) if rffi.aroundstate.before: rffi.aroundstate.before() - return lltype.nullptr(PyThreadState.TO) + tstate = state.swap_thread_state( + space, lltype.nullptr(PyThreadState.TO)) + return tstate @cpython_api([PyThreadState], lltype.Void) def PyEval_RestoreThread(space, tstate): @@ -35,6 +38,8 @@ when thread support is disabled at compile time.)""" if rffi.aroundstate.after: rffi.aroundstate.after() + state = space.fromcache(InterpreterState) + state.swap_thread_state(space, tstate) @cpython_api([], lltype.Void) def PyEval_InitThreads(space): @@ -67,28 +72,91 @@ dealloc=ThreadState_dealloc) from pypy.interpreter.executioncontext import ExecutionContext + +# Keep track of the ThreadStateCapsule for a particular execution context. The +# default is for new execution contexts not to have one; it is allocated on the +# first cpyext-based request for it. ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None) +# Also keep track of whether it has been initialized yet or not (None is a valid +# PyThreadState for an execution context to have, when the GIL has been +# released, so a check against that can't be used to determine the need for +# initialization). +ExecutionContext.cpyext_initialized_threadstate = False + +def cleanup_cpyext_state(self): + try: + del self.cpyext_threadstate + except AttributeError: + pass + self.cpyext_initialized_threadstate = False +ExecutionContext.cleanup_cpyext_state = cleanup_cpyext_state + class InterpreterState(object): def __init__(self, space): self.interpreter_state = lltype.malloc( PyInterpreterState.TO, flavor='raw', zero=True, immortal=True) def new_thread_state(self, space): + """ + Create a new ThreadStateCapsule to hold the PyThreadState for a + particular execution context. + + :param space: A space. + + :returns: A new ThreadStateCapsule holding a newly allocated + PyThreadState and referring to this interpreter state. + """ capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state ts.c_dict = make_ref(space, space.newdict()) return capsule + def get_thread_state(self, space): + """ + Get the current PyThreadState for the current execution context. + + :param space: A space. + + :returns: The current PyThreadState for the current execution context, + or None if it does not have one. + """ ec = space.getexecutioncontext() return self._get_thread_state(space, ec).memory + + def swap_thread_state(self, space, tstate): + """ + Replace the current thread state of the current execution context with a + new thread state. + + :param space: The space. + + :param tstate: The new PyThreadState for the current execution context. + + :returns: The old thread state for the current execution context, either + None or a PyThreadState. + """ + ec = space.getexecutioncontext() + capsule = self._get_thread_state(space, ec) + old_tstate = capsule.memory + capsule.memory = tstate + return old_tstate + def _get_thread_state(self, space, ec): - if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO): + """ + Get the ThreadStateCapsule for the given execution context, possibly + creating a new one if it does not already have one. + + :param space: The space. + :param ec: The ExecutionContext of which to get the thread state. + :returns: The ThreadStateCapsule for the given execution context. + """ + if not ec.cpyext_initialized_threadstate: ec.cpyext_threadstate = self.new_thread_state(space) - + ec.cpyext_initialized_threadstate = True return ec.cpyext_threadstate @cpython_api([], PyThreadState, error=CANNOT_FAIL) @@ -105,13 +173,8 @@ def PyThreadState_Swap(space, tstate): """Swap the current thread state with the thread state given by the argument tstate, which may be NULL. The global interpreter lock must be held.""" - # All cpyext calls release and acquire the GIL, so this function has no - # side-effects - if tstate: - return lltype.nullptr(PyThreadState.TO) - else: - state = space.fromcache(InterpreterState) - return state.get_thread_state(space) + state = space.fromcache(InterpreterState) + return state.swap_thread_state(space, tstate) @cpython_api([PyThreadState], lltype.Void) def PyEval_AcquireThread(space, tstate): diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -23,16 +23,33 @@ #define FLAG_COMPAT 1 #define FLAG_SIZE_T 2 +typedef int (*destr_t)(PyObject *, void *); + + +/* Keep track of "objects" that have been allocated or initialized and + which will need to be deallocated or cleaned up somehow if overall + parsing fails. +*/ +typedef struct { + void *item; + destr_t destructor; +} freelistentry_t; + +typedef struct { + int first_available; + freelistentry_t *entries; +} freelist_t; + /* Forward */ static int vgetargs1(PyObject *, const char *, va_list *, int); static void seterror(int, const char *, int *, const char *, const char *); static char *convertitem(PyObject *, const char **, va_list *, int, int *, - char *, size_t, PyObject **); + char *, size_t, freelist_t *); static char *converttuple(PyObject *, const char **, va_list *, int, - int *, char *, size_t, int, PyObject **); + int *, char *, size_t, int, freelist_t *); static char *convertsimple(PyObject *, const char **, va_list *, int, char *, - size_t, PyObject **); + size_t, freelist_t *); static Py_ssize_t convertbuffer(PyObject *, void **p, char **); static int getbuffer(PyObject *, Py_buffer *, char**); @@ -129,57 +146,56 @@ /* Handle cleanup of allocated memory in case of exception */ -static void -cleanup_ptr(void *ptr) +static int +cleanup_ptr(PyObject *self, void *ptr) { - PyMem_FREE(ptr); -} - -static void -cleanup_buffer(void *ptr) -{ - PyBuffer_Release((Py_buffer *) ptr); + if (ptr) { + PyMem_FREE(ptr); + } + return 0; } static int -addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) +cleanup_buffer(PyObject *self, void *ptr) { - PyObject *cobj; - if (!*freelist) { - *freelist = PyList_New(0); - if (!*freelist) { - destr(ptr); - return -1; - } - } - cobj = PyCObject_FromVoidPtr(ptr, destr); - if (!cobj) { - destr(ptr); - return -1; - } - if (PyList_Append(*freelist, cobj)) { - Py_DECREF(cobj); - return -1; - } - Py_DECREF(cobj); - return 0; + Py_buffer *buf = (Py_buffer *)ptr; + if (buf) { + PyBuffer_Release(buf); + } + return 0; } static int -cleanreturn(int retval, PyObject *freelist) +addcleanup(void *ptr, freelist_t *freelist, destr_t destructor) { - if (freelist && retval != 0) { - /* We were successful, reset the destructors so that they - don't get called. */ - Py_ssize_t len = PyList_GET_SIZE(freelist), i; - for (i = 0; i < len; i++) - ((PyCObject *) PyList_GET_ITEM(freelist, i)) - ->destructor = NULL; - } - Py_XDECREF(freelist); - return retval; + int index; + + index = freelist->first_available; + freelist->first_available += 1; + + freelist->entries[index].item = ptr; + freelist->entries[index].destructor = destructor; + + return 0; } +static int +cleanreturn(int retval, freelist_t *freelist) +{ + int index; + + if (retval == 0) { + /* A failure occurred, therefore execute all of the cleanup + functions. + */ + for (index = 0; index < freelist->first_available; ++index) { + freelist->entries[index].destructor(NULL, + freelist->entries[index].item); + } + } + PyMem_Free(freelist->entries); + return retval; +} static int vgetargs1(PyObject *args, const char *format, va_list *p_va, int flags) @@ -195,7 +211,7 @@ const char *formatsave = format; Py_ssize_t i, len; char *msg; - PyObject *freelist = NULL; + freelist_t freelist = {0, NULL}; int compat = flags & FLAG_COMPAT; assert(compat || (args != (PyObject*)NULL)); @@ -251,16 +267,18 @@ format = formatsave; + freelist.entries = PyMem_New(freelistentry_t, max); + if (compat) { if (max == 0) { if (args == NULL) - return 1; + return cleanreturn(1, &freelist); PyOS_snprintf(msgbuf, sizeof(msgbuf), "%.200s%s takes no arguments", fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } else if (min == 1 && max == 1) { if (args == NULL) { @@ -269,26 +287,26 @@ fname==NULL ? "function" : fname, fname==NULL ? "" : "()"); PyErr_SetString(PyExc_TypeError, msgbuf); - return 0; + return cleanreturn(0, &freelist); } msg = convertitem(args, &format, p_va, flags, levels, msgbuf, sizeof(msgbuf), &freelist); if (msg == NULL) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); seterror(levels[0], msg, levels+1, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } else { PyErr_SetString(PyExc_SystemError, "old style getargs format uses new features"); - return 0; + return cleanreturn(0, &freelist); } } if (!PyTuple_Check(args)) { PyErr_SetString(PyExc_SystemError, "new style getargs format but argument is not a tuple"); - return 0; + return cleanreturn(0, &freelist); } len = PyTuple_GET_SIZE(args); @@ -308,7 +326,7 @@ message = msgbuf; } PyErr_SetString(PyExc_TypeError, message); - return 0; + return cleanreturn(0, &freelist); } for (i = 0; i < len; i++) { @@ -319,7 +337,7 @@ sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, message); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -328,10 +346,10 @@ *format != '|' && *format != ':' && *format != ';') { PyErr_Format(PyExc_SystemError, "bad format string: %.200s", formatsave); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } @@ -395,7 +413,7 @@ static char * converttuple(PyObject *arg, const char **p_format, va_list *p_va, int flags, int *levels, char *msgbuf, size_t bufsize, int toplevel, - PyObject **freelist) + freelist_t *freelist) { int level = 0; int n = 0; @@ -472,7 +490,7 @@ static char * convertitem(PyObject *arg, const char **p_format, va_list *p_va, int flags, - int *levels, char *msgbuf, size_t bufsize, PyObject **freelist) + int *levels, char *msgbuf, size_t bufsize, freelist_t *freelist) { char *msg; const char *format = *p_format; @@ -539,7 +557,7 @@ static char * convertsimple(PyObject *arg, const char **p_format, va_list *p_va, int flags, - char *msgbuf, size_t bufsize, PyObject **freelist) + char *msgbuf, size_t bufsize, freelist_t *freelist) { /* For # codes */ #define FETCH_SIZE int *q=NULL;Py_ssize_t *q2=NULL;\ @@ -1501,7 +1519,9 @@ const char *fname, *msg, *custom_msg, *keyword; int min = INT_MAX; int i, len, nargs, nkeywords; - PyObject *freelist = NULL, *current_arg; + PyObject *current_arg; + freelist_t freelist = {0, NULL}; + assert(args != NULL && PyTuple_Check(args)); assert(keywords == NULL || PyDict_Check(keywords)); @@ -1525,6 +1545,8 @@ for (len=0; kwlist[len]; len++) continue; + freelist.entries = PyMem_New(freelistentry_t, len); + nargs = PyTuple_GET_SIZE(args); nkeywords = (keywords == NULL) ? 0 : PyDict_Size(keywords); if (nargs + nkeywords > len) { @@ -1535,7 +1557,7 @@ len, (len == 1) ? "" : "s", nargs + nkeywords); - return 0; + return cleanreturn(0, &freelist); } /* convert tuple args and keyword args in same loop, using kwlist to drive process */ @@ -1549,7 +1571,7 @@ PyErr_Format(PyExc_RuntimeError, "More keyword list entries (%d) than " "format specifiers (%d)", len, i); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } current_arg = NULL; if (nkeywords) { @@ -1563,11 +1585,11 @@ "Argument given by name ('%s') " "and position (%d)", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } else if (nkeywords && PyErr_Occurred()) - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); else if (i < nargs) current_arg = PyTuple_GET_ITEM(args, i); @@ -1576,7 +1598,7 @@ levels, msgbuf, sizeof(msgbuf), &freelist); if (msg) { seterror(i+1, msg, levels, fname, custom_msg); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } continue; } @@ -1585,14 +1607,14 @@ PyErr_Format(PyExc_TypeError, "Required argument " "'%s' (pos %d) not found", keyword, i+1); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* current code reports success when all required args * fulfilled and no keyword args left, with no further * validation. XXX Maybe skip this in debug build ? */ if (!nkeywords) - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); /* We are into optional args, skip thru to any remaining * keyword args */ @@ -1600,7 +1622,7 @@ if (msg) { PyErr_Format(PyExc_RuntimeError, "%s: '%s'", msg, format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } @@ -1608,7 +1630,7 @@ PyErr_Format(PyExc_RuntimeError, "more argument specifiers than keyword list entries " "(remaining format:'%s')", format); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } /* make sure there are no extraneous keyword arguments */ @@ -1621,7 +1643,7 @@ if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } ks = PyString_AsString(key); for (i = 0; i < len; i++) { @@ -1635,12 +1657,12 @@ "'%s' is an invalid keyword " "argument for this function", ks); - return cleanreturn(0, freelist); + return cleanreturn(0, &freelist); } } } - return cleanreturn(1, freelist); + return cleanreturn(1, &freelist); } diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -130,6 +130,11 @@ @cpython_api([PyObject], rffi.CCHARP, error=0) def PyString_AsString(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + pass # typecheck returned "ok" without forcing 'ref' at all + elif not PyString_Check(space, ref): # otherwise, use the alternate way + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsString only support strings")) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -10,7 +10,7 @@ return False def pytest_funcarg__space(request): - return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) def pytest_funcarg__api(request): return request.cls.api diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -19,7 +19,8 @@ class BaseApiTest(LeakCheckingTest): def setup_class(cls): - cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', + 'array']) # warm up reference counts: # - the posix module allocates a HCRYPTPROV on Windows diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,3 +1,4 @@ +from pypy.conftest import gettestobjspace from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -35,7 +35,7 @@ class AppTestApi: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) from pypy.rlib.libffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) @@ -106,10 +106,7 @@ del obj import gc; gc.collect() - try: - del space.getexecutioncontext().cpyext_threadstate - except AttributeError: - pass + space.getexecutioncontext().cleanup_cpyext_state() for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) @@ -168,8 +165,9 @@ return leaking class AppTestCpythonExtensionBase(LeakCheckingTest): + def setup_class(cls): - cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi']) + cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array']) cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -19,7 +19,7 @@ space.wrap('__name__'))) == 'foobar' def test_getmoduledict(self, space, api): - testmod = "binascii" + testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -101,9 +101,9 @@ space.wrap((2, 7)))): py.test.skip("unsupported before Python 2.7") - assert api._PyLong_Sign(space.wrap(0L)) == 0 - assert api._PyLong_Sign(space.wrap(2L)) == 1 - assert api._PyLong_Sign(space.wrap(-2L)) == -1 + assert api._PyLong_Sign(space.wraplong(0L)) == 0 + assert api._PyLong_Sign(space.wraplong(2L)) == 1 + assert api._PyLong_Sign(space.wraplong(-2L)) == -1 assert api._PyLong_NumBits(space.wrap(0)) == 0 assert api._PyLong_NumBits(space.wrap(1)) == 1 diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -6,12 +6,12 @@ class TestIterator(BaseApiTest): def test_check(self, space, api): assert api.PyIndex_Check(space.wrap(12)) - assert api.PyIndex_Check(space.wrap(-12L)) + assert api.PyIndex_Check(space.wraplong(-12L)) assert not api.PyIndex_Check(space.wrap(12.1)) assert not api.PyIndex_Check(space.wrap('12')) assert api.PyNumber_Check(space.wrap(12)) - assert api.PyNumber_Check(space.wrap(-12L)) + assert api.PyNumber_Check(space.wraplong(-12L)) assert api.PyNumber_Check(space.wrap(12.1)) assert not api.PyNumber_Check(space.wrap('12')) assert not api.PyNumber_Check(space.wrap(1+3j)) @@ -21,7 +21,7 @@ assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): - w_l = api.PyNumber_Int(space.wrap(123L)) + w_l = api.PyNumber_Int(space.wraplong(123L)) assert api.PyInt_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(2 << 65)) assert api.PyLong_CheckExact(w_l) @@ -29,7 +29,7 @@ assert api.PyInt_CheckExact(w_l) def test_number_index(self, space, api): - w_l = api.PyNumber_Index(space.wrap(123L)) + w_l = api.PyNumber_Index(space.wraplong(123L)) assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Index(space.wrap(42.3)) assert w_l is None diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -3,6 +3,10 @@ from pypy.rpython.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState from pypy.module.cpyext.pyobject import from_ref +from pypy.rpython.lltypesystem import lltype +from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts +from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head +from pypy.tool import leakfinder class AppTestThreads(AppTestCpythonExtensionBase): def test_allow_threads(self): @@ -21,6 +25,93 @@ # Should compile at least module.test() + + def test_thread_state_get(self): + module = self.import_extension('foo', [ + ("get", "METH_NOARGS", + """ + PyThreadState *tstate = PyThreadState_Get(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + if (tstate->interp != PyInterpreterState_Head()) { + return PyLong_FromLong(1); + } + if (tstate->interp->next != NULL) { + return PyLong_FromLong(2); + } + return PyLong_FromLong(3); + """), + ]) + assert module.get() == 3 + + def test_basic_threadstate_dance(self): + module = self.import_extension('foo', [ + ("dance", "METH_NOARGS", + """ + PyThreadState *old_tstate, *new_tstate; + + old_tstate = PyThreadState_Swap(NULL); + if (old_tstate == NULL) { + return PyLong_FromLong(0); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != NULL) { + return PyLong_FromLong(1); + } + + new_tstate = PyThreadState_Swap(old_tstate); + if (new_tstate != NULL) { + return PyLong_FromLong(2); + } + + new_tstate = PyThreadState_Get(); + if (new_tstate != old_tstate) { + return PyLong_FromLong(3); + } + + return PyLong_FromLong(4); + """), + ]) + assert module.dance() == 4 + + def test_threadstate_dict(self): + module = self.import_extension('foo', [ + ("getdict", "METH_NOARGS", + """ + PyObject *dict = PyThreadState_GetDict(); + Py_INCREF(dict); + return dict; + """), + ]) + assert isinstance(module.getdict(), dict) + + def test_savethread(self): + module = self.import_extension('foo', [ + ("bounce", "METH_NOARGS", + """ + PyThreadState *tstate = PyEval_SaveThread(); + if (tstate == NULL) { + return PyLong_FromLong(0); + } + + if (PyThreadState_Get() != NULL) { + return PyLong_FromLong(1); + } + + PyEval_RestoreThread(tstate); + + if (PyThreadState_Get() != tstate) { + return PyLong_FromLong(2); + } + + return PyLong_FromLong(3); + """), + ]) + + + class TestInterpreterState(BaseApiTest): def test_interpreter_head(self, space, api): state = api.PyInterpreterState_Head() @@ -29,31 +120,3 @@ def test_interpreter_next(self, space, api): state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) - -class TestThreadState(BaseApiTest): - def test_thread_state_get(self, space, api): - ts = api.PyThreadState_Get() - assert ts != nullptr(PyThreadState.TO) - - def test_thread_state_interp(self, space, api): - ts = api.PyThreadState_Get() - assert ts.c_interp == api.PyInterpreterState_Head() - assert ts.c_interp.c_next == nullptr(PyInterpreterState.TO) - - def test_basic_threadstate_dance(self, space, api): - # Let extension modules call these functions, - # Not sure of the semantics in pypy though. - # (cpyext always acquires and releases the GIL around calls) - tstate = api.PyThreadState_Swap(None) - assert tstate is not None - assert not api.PyThreadState_Swap(tstate) - - api.PyEval_AcquireThread(tstate) - api.PyEval_ReleaseThread(tstate) - - def test_threadstate_dict(self, space, api): - ts = api.PyThreadState_Get() - ref = ts.c_dict - assert ref == api.PyThreadState_GetDict() - w_obj = from_ref(space, ref) - assert space.isinstance_w(w_obj, space.w_dict) diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -105,6 +105,15 @@ )]) assert module.string_as_string("huheduwe") == "huhe" + def test_py_string_as_string_None(self): + module = self.import_extension('foo', [ + ("string_None", "METH_VARARGS", + ''' + return PyString_AsString(Py_None); + ''' + )]) + raises(TypeError, module.string_None) + def test_AsStringAndSize(self): module = self.import_extension('foo', [ ("getstring", "METH_NOARGS", diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl', 'array')) + space = gettestobjspace(usemodules=('fcntl', 'array', 'struct')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -987,6 +987,10 @@ os.environ['LANG'] = oldlang class AppTestImportHooks(object): + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('struct',)) + def test_meta_path(self): tried_imports = [] class Importer(object): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -891,7 +891,7 @@ class AppTestItertools27: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['itertools']) + cls.space = gettestobjspace(usemodules=['itertools', 'struct']) if cls.space.is_true(cls.space.appexec([], """(): import sys; return sys.version_info < (2, 7) """)): diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -327,8 +327,10 @@ # %r not supported in rpython #u.raise_exc('invalid typecode in unmarshal: %r' % tc) c = ord(tc) - if c < 32 or c > 126: - s = '\\x' + hex(c) + if c < 16: + s = '\\x0%x' % c + elif c < 32 or c > 126: + s = '\\x%x' % c elif tc == '\\': s = r'\\' else: diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py deleted file mode 100644 --- a/pypy/module/marshal/test/make_test_marshal.py +++ /dev/null @@ -1,78 +0,0 @@ - -TESTCASES = """\ - None - False - True - StopIteration - Ellipsis - 42 - -17 - sys.maxint - -1.25 - -1.25 #2 - 2+5j - 2+5j #2 - 42L - -1234567890123456789012345678901234567890L - hello # not interned - "hello" - () - (1, 2) - [] - [3, 4] - {} - {5: 6, 7: 8} - func.func_code - scopefunc.func_code - u'hello' - set() - set([1, 2]) - frozenset() - frozenset([3, 4]) -""".strip().split('\n') - -def readable(s): - for c, repl in ( - ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'), - ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'), - ('-', '_minus_'), ('+', '_plus_'), - (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ): - s = s.replace(c, repl) - lis = list(s) - for i, c in enumerate(lis): - if c.isalnum() or c == '_': - continue - lis[i] = '_' - return ''.join(lis) - -print """class AppTestMarshal: -""" -for line in TESTCASES: - line = line.strip() - name = readable(line) - version = '' - extra = '' - if line.endswith('#2'): - version = ', 2' - extra = '; assert len(s) in (9, 17)' - src = '''\ - def test_%(name)s(self): - import sys - hello = "he" - hello += "llo" - def func(x): - return lambda y: x+y - scopefunc = func(42) - import marshal, StringIO - case = %(line)s - print "case: %%-30s func=%(name)s" %% (case, ) - s = marshal.dumps(case%(version)s)%(extra)s - x = marshal.loads(s) - assert x == case - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case -''' % {'name': name, 'line': line, 'version' : version, 'extra': extra} - print src diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -174,6 +174,11 @@ pass raises(ValueError, marshal.dumps, subtype) + def test_bad_typecode(self): + import marshal + exc = raises(ValueError, marshal.loads, chr(1)) + assert r"'\x01'" in exc.value.message + class AppTestRope(AppTestMarshal): def setup_class(cls): diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py --- a/pypy/module/math/test/test_direct.py +++ b/pypy/module/math/test/test_direct.py @@ -55,6 +55,15 @@ ('frexp', (-1.25,), lambda x: x == (-0.625, 1)), ('modf', (4.25,), lambda x: x == (0.25, 4.0)), ('modf', (-4.25,), lambda x: x == (-0.25, -4.0)), + ('copysign', (1.5, 0.0), 1.5), + ('copysign', (1.5, -0.0), -1.5), + ('copysign', (1.5, INFINITY), 1.5), + ('copysign', (1.5, -INFINITY), -1.5), + ] + if sys.platform != 'win32': # all NaNs seem to be negative there...? + IRREGCASES += [ + ('copysign', (1.5, NAN), 1.5), + ('copysign', (1.75, -NAN), -1.75), # special case for -NAN here ] OVFCASES = [ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.conftest import gettestobjspace from pypy.module.math.test import test_direct @@ -5,7 +6,7 @@ class AppTestMath: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['math']) + cls.space = gettestobjspace(usemodules=['math', 'struct']) cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES) cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host) @@ -268,3 +269,7 @@ def __trunc__(self): return "truncated" assert math.trunc(foo()) == "truncated" + + def test_copysign_nan(self): + import math + assert math.copysign(1.0, float('-nan')) == -1.0 diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -37,26 +37,44 @@ 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', + 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'generic': 'interp_boxes.W_GenericBox', 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', + 'bool8': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'byte': 'interp_boxes.W_Int8Box', 'uint8': 'interp_boxes.W_UInt8Box', + 'ubyte': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'short': 'interp_boxes.W_Int16Box', 'uint16': 'interp_boxes.W_UInt16Box', + 'ushort': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'intc': 'interp_boxes.W_Int32Box', 'uint32': 'interp_boxes.W_UInt32Box', + 'uintc': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', 'uint64': 'interp_boxes.W_UInt64Box', + 'longlong': 'interp_boxes.W_LongLongBox', + 'ulonglong': 'interp_boxes.W_ULongLongBox', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', 'float_': 'interp_boxes.W_Float64Box', 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', + 'intp': 'types.IntP.BoxType', + 'uintp': 'types.UIntP.BoxType', + 'flexible': 'interp_boxes.W_FlexibleBox', + 'character': 'interp_boxes.W_CharacterBox', + 'str_': 'interp_boxes.W_StringBox', + 'unicode_': 'interp_boxes.W_UnicodeBox', + 'void': 'interp_boxes.W_VoidBox', } # ufuncs @@ -67,6 +85,7 @@ ("arccos", "arccos"), ("arcsin", "arcsin"), ("arctan", "arctan"), + ("arctan2", "arctan2"), ("arccosh", "arccosh"), ("arcsinh", "arcsinh"), ("arctanh", "arctanh"), @@ -77,7 +96,10 @@ ("true_divide", "true_divide"), ("equal", "equal"), ("exp", "exp"), + ("exp2", "exp2"), + ("expm1", "expm1"), ("fabs", "fabs"), + ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), ("greater", "greater"), @@ -89,8 +111,13 @@ ("multiply", "multiply"), ("negative", "negative"), ("not_equal", "not_equal"), + ("radians", "radians"), + ("degrees", "degrees"), + ("deg2rad", "radians"), + ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), ("sign", "sign"), + ("signbit", "signbit"), ("sin", "sin"), ("sinh", "sinh"), ("subtract", "subtract"), @@ -103,10 +130,21 @@ ('bitwise_not', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), + ('isneginf', 'isneginf'), + ('isposinf', 'isposinf'), + ('isfinite', 'isfinite'), ('logical_and', 'logical_and'), ('logical_xor', 'logical_xor'), ('logical_not', 'logical_not'), ('logical_or', 'logical_or'), + ('log', 'log'), + ('log2', 'log2'), + ('log10', 'log10'), + ('log1p', 'log1p'), + ('power', 'power'), + ('floor_divide', 'floor_divide'), + ('logaddexp', 'logaddexp'), + ('logaddexp2', 'logaddexp2'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,7 +16,7 @@ a[i][i] = 1 return a -def sum(a,axis=None): +def sum(a,axis=None, out=None): '''sum(a, axis=None) Sum of array elements over a given axis. @@ -43,17 +43,17 @@ # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = _numpypy.array(a) - return a.sum(axis) + return a.sum(axis=axis, out=out) -def min(a, axis=None): +def min(a, axis=None, out=None): if not hasattr(a, "min"): a = _numpypy.array(a) - return a.min(axis) + return a.min(axis=axis, out=out) -def max(a, axis=None): +def max(a, axis=None, out=None): if not hasattr(a, "max"): a = _numpypy.array(a) - return a.max(axis) + return a.max(axis=axis, out=out) def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -33,7 +33,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat"] + "unegative", "flat", "tostring"] TWO_ARG_FUNCTIONS = ["dot", 'take'] class FakeSpace(object): @@ -51,6 +51,8 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" + w_unicode = "unicode" def __init__(self): """NOT_RPYTHON""" @@ -91,8 +93,12 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, long): + return LongObject(obj) elif isinstance(obj, W_Root): return obj + elif isinstance(obj, str): + return StringObject(obj) raise NotImplementedError def newlist(self, items): @@ -120,6 +126,11 @@ return int(w_obj.floatval) raise NotImplementedError + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj.v + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj @@ -151,7 +162,13 @@ return instantiate(klass) def newtuple(self, list_w): - raise ValueError + return ListObject(list_w) + + def newdict(self): + return {} + + def setitem(self, dict, item, value): + dict[item] = value def len_w(self, w_obj): if isinstance(w_obj, ListObject): @@ -178,6 +195,11 @@ def __init__(self, intval): self.intval = intval +class LongObject(W_Root): + tp = FakeSpace.w_long + def __init__(self, intval): + self.intval = intval + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): @@ -190,6 +212,11 @@ self.stop = stop self.step = step +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, v): + self.v = v + class InterpreterState(object): def __init__(self, code): self.code = code @@ -407,6 +434,9 @@ w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) + elif self.name == "tostring": + arr.descr_tostring(interp.space) + w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,24 +1,25 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.stringtype import str_typedef +from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () def new_dtype_getter(name): - def get_dtype(space): + def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return getattr(get_dtype_cache(space), "w_%sdtype" % name) def new(space, w_subtype, w_value): - dtype = get_dtype(space) + dtype = _get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -37,6 +38,9 @@ w_subtype.getname(space, '?') ) + def get_dtype(self, space): + return self._get_dtype(space) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -44,12 +48,12 @@ return space.format(self.item(space), w_spec) def descr_int(self, space): - box = self.convert_to(W_LongBox.get_dtype(space)) + box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box.get_dtype(space)) + box = self.convert_to(W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -58,21 +62,24 @@ return space.wrap(dtype.itemtype.bool(self)) def _binop_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): - def impl(self, space): + def impl(self, space, w_out=None): from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -80,6 +87,7 @@ descr_mul = _binop_impl("multiply") descr_div = _binop_impl("divide") descr_truediv = _binop_impl("true_divide") + descr_floordiv = _binop_impl("floor_divide") descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") descr_lshift = _binop_impl("left_shift") @@ -100,6 +108,7 @@ descr_rmul = _binop_right_impl("multiply") descr_rdiv = _binop_right_impl("divide") descr_rtruediv = _binop_right_impl("true_divide") + descr_rfloordiv = _binop_right_impl("floor_divide") descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") descr_rlshift = _binop_right_impl("left_shift") @@ -128,7 +137,7 @@ class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("bool") + descr__new__, _get_dtype = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): _attrs_ = () @@ -144,34 +153,40 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int8") + descr__new__, _get_dtype = new_dtype_getter("int8") class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint8") + descr__new__, _get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int16") + descr__new__, _get_dtype = new_dtype_getter("int16") class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint16") + descr__new__, _get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int32") + descr__new__, _get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint32") + descr__new__, _get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("long") + descr__new__, _get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("ulong") + descr__new__, _get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("int64") + descr__new__, _get_dtype = new_dtype_getter("int64") + +class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('longlong') class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("uint64") + descr__new__, _get_dtype = new_dtype_getter("uint64") + +class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): _attrs_ = () @@ -180,16 +195,71 @@ _attrs_ = () class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float32") + descr__new__, _get_dtype = new_dtype_getter("float32") class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, get_dtype = new_dtype_getter("float64") + descr__new__, _get_dtype = new_dtype_getter("float64") +class W_FlexibleBox(W_GenericBox): + def __init__(self, arr, ofs, dtype): + self.arr = arr # we have to keep array alive + self.ofs = ofs + self.dtype = dtype + + def get_dtype(self, space): + return self.arr.dtype + @unwrap_spec(self=W_GenericBox) def descr_index(space, self): return space.index(self.item(space)) +class W_VoidBox(W_FlexibleBox): + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + return dtype.itemtype.read(self.arr, 1, self.ofs, ofs, dtype) + + @unwrap_spec(item=str) + def descr_setitem(self, space, item, w_value): + try: + ofs, dtype = self.dtype.fields[item] + except KeyError: + raise OperationError(space.w_IndexError, + space.wrap("Field %s does not exist" % item)) + dtype.itemtype.store(self.arr, 1, self.ofs, ofs, + dtype.coerce(space, w_value)) + +class W_CharacterBox(W_FlexibleBox): + pass + +class W_StringBox(W_CharacterBox): + def descr__new__string_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_string_dtype + + arg = space.str_w(space.str(w_arg)) + arr = W_NDimArray([1], new_string_dtype(space, len(arg))) + for i in range(len(arg)): + arr.storage[i] = arg[i] + return W_StringBox(arr, 0, arr.dtype) + + +class W_UnicodeBox(W_CharacterBox): + def descr__new__unicode_box(space, w_subtype, w_arg): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + + arg = space.unicode_w(unicode_from_object(space, w_arg)) + arr = W_NDimArray([1], new_unicode_dtype(space, len(arg))) + # XXX not this way, we need store + #for i in range(len(arg)): + # arr.storage[i] = arg[i] + return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -208,6 +278,7 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), __truediv__ = interp2app(W_GenericBox.descr_truediv), + __floordiv__ = interp2app(W_GenericBox.descr_floordiv), __mod__ = interp2app(W_GenericBox.descr_mod), __divmod__ = interp2app(W_GenericBox.descr_divmod), __pow__ = interp2app(W_GenericBox.descr_pow), @@ -222,6 +293,7 @@ __rmul__ = interp2app(W_GenericBox.descr_rmul), __rdiv__ = interp2app(W_GenericBox.descr_rdiv), __rtruediv__ = interp2app(W_GenericBox.descr_rtruediv), + __rfloordiv__ = interp2app(W_GenericBox.descr_rfloordiv), __rmod__ = interp2app(W_GenericBox.descr_rmod), __rdivmod__ = interp2app(W_GenericBox.descr_rdivmod), __rpow__ = interp2app(W_GenericBox.descr_rpow), @@ -344,3 +416,28 @@ __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) + +W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, + __module__ = "numpypy", + __getitem__ = interp2app(W_VoidBox.descr_getitem), + __setitem__ = interp2app(W_VoidBox.descr_setitem), +) + +W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, + __module__ = "numpypy", +) + +W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), +) + +W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), +) + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,26 +1,29 @@ + +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.module.micronumpy import types, interp_boxes from pypy.rlib.objectmodel import specialize -from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" - - -VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) +VOIDLTR = 'V' +STRINGLTR = 'S' +UNICODELTR = 'U' class W_Dtype(Wrappable): _immutable_fields_ = ["itemtype", "num", "kind"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + alternate_constructors=[], aliases=[], + fields=None, fieldnames=None, native=True): self.itemtype = itemtype self.num = num self.kind = kind @@ -29,53 +32,28 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases - - def malloc(self, length): - # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - ) + self.fields = fields + self.fieldnames = fieldnames + self.native = native @specialize.argtype(1) def box(self, value): return self.itemtype.box(value) def coerce(self, space, w_item): - return self.itemtype.coerce(space, w_item) + return self.itemtype.coerce(space, self, w_item) - def getitem(self, storage, i): - return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + def getitem(self, arr, i): + return self.itemtype.read(arr, 1, i, 0) - def getitem_bool(self, storage, i): - isize = self.itemtype.get_element_size() - return self.itemtype.read_bool(storage, isize, i, 0) + def getitem_bool(self, arr, i): + return self.itemtype.read_bool(arr, 1, i, 0) - def setitem(self, storage, i, box): - self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + def setitem(self, arr, i, box): + self.itemtype.store(arr, 1, i, 0, box) def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) - - def descr__new__(space, w_subtype, w_dtype): - cache = get_dtype_cache(space) - - if space.is_w(w_dtype, space.w_None): - return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): - name = space.str_w(w_dtype) - for dtype in cache.builtin_dtypes: - if dtype.name == name or dtype.char == name or name in dtype.aliases: - return dtype - else: - for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: - return dtype - if w_dtype is dtype.w_box_type: - return dtype - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) def descr_str(self, space): return space.wrap(self.name) @@ -86,6 +64,14 @@ def descr_get_itemsize(self, space): return space.wrap(self.itemtype.get_element_size()) + def descr_get_byteorder(self, space): + if self.native: + return space.wrap('=') + return space.wrap(nonnative_byteorder_prefix) + + def descr_get_alignment(self, space): + return space.wrap(self.itemtype.alignment) + def descr_get_shape(self, space): return space.newtuple([]) @@ -99,31 +85,193 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_get_fields(self, space): + if self.fields is None: + return space.w_None + w_d = space.newdict() + for name, (offset, subdtype) in self.fields.iteritems(): + space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, + space.wrap(offset)])) + return w_d + + def descr_get_names(self, space): + if self.fieldnames is None: + return space.w_None + return space.newtuple([space.wrap(name) for name in self.fieldnames]) + + @unwrap_spec(item=str) + def descr_getitem(self, space, item): + if self.fields is None: + raise OperationError(space.w_KeyError, space.wrap("There are no keys in dtypes %s" % self.name)) + try: + return self.fields[item][1] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap("Field named %s not found" % item)) + def is_int_type(self): return (self.kind == SIGNEDLTR or self.kind == UNSIGNEDLTR or self.kind == BOOLLTR) + def is_signed(self): + return self.kind == SIGNEDLTR + def is_bool_type(self): return self.kind == BOOLLTR + def is_record_type(self): + return self.fields is not None + + def __repr__(self): + if self.fields is not None: + return '' % self.fields + return '' % self.itemtype + + def get_size(self): + return self.itemtype.get_element_size() + +def dtype_from_list(space, w_lst): + lst_w = space.listview(w_lst) + fields = {} + offset = 0 + ofs_and_items = [] + fieldnames = [] + for w_elem in lst_w: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc) + fldname = space.str_w(w_fldname) + if fldname in fields: + raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + assert isinstance(subdtype, W_Dtype) + fields[fldname] = (offset, subdtype) + ofs_and_items.append((offset, subdtype.itemtype)) + offset += subdtype.itemtype.get_element_size() + fieldnames.append(fldname) + itemtype = types.RecordType(ofs_and_items, offset) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()), + "V", space.gettypefor(interp_boxes.W_VoidBox), fields=fields, + fieldnames=fieldnames) + +def dtype_from_dict(space, w_dict): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from dict")) + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + w_box_type = space.gettypefor(interp_boxes.W_VoidBox) + raise OperationError(space.w_NotImplementedError, space.wrap( + "pure void dtype")) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) + +def dtype_from_spec(space, name): + raise OperationError(space.w_NotImplementedError, space.wrap( + "dtype from spec")) + +def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + + if space.is_w(w_dtype, space.w_None): + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_str): + name = space.str_w(w_dtype) + if ',' in name: + return dtype_from_spec(space, name) + try: + return cache.dtypes_by_name[name] + except KeyError: + pass + if name[0] in 'VSU' or name[0] in '<>=' and name[1] in 'VSU': + return variable_dtype(space, name) + elif space.isinstance_w(w_dtype, space.w_list): + return dtype_from_list(space, w_dtype) + elif space.isinstance_w(w_dtype, space.w_dict): + return dtype_from_dict(space, w_dtype) + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpypy", - __new__ = interp2app(W_Dtype.descr__new__.im_func), + __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __getitem__ = interp2app(W_Dtype.descr_getitem), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), + char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), + fields = GetSetProperty(W_Dtype.descr_get_fields), + names = GetSetProperty(W_Dtype.descr_get_names), ) W_Dtype.typedef.acceptable_as_base_class = False +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' + +def new_string_dtype(space, size): + return W_Dtype( + types.StringType(size), + num=18, + kind=STRINGLTR, + name='string', + char='S' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + ) + +def new_unicode_dtype(space, size): + return W_Dtype( + types.UnicodeType(size), + num=19, + kind=UNICODELTR, + name='unicode', + char='U' + str(size), + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( @@ -239,18 +387,134 @@ alternate_constructors=[space.w_float], aliases=["float"], ) - + self.w_stringdtype = W_Dtype( + types.StringType(1), + num=18, + kind=STRINGLTR, + name='string', + char='S', + w_box_type = space.gettypefor(interp_boxes.W_StringBox), + alternate_constructors=[space.w_str], + ) + self.w_unicodedtype = W_Dtype( + types.UnicodeType(1), + num=19, + kind=UNICODELTR, + name='unicode', + char='U', + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + alternate_constructors=[space.w_unicode], + ) + self.w_voiddtype = W_Dtype( + types.VoidType(0), + num=20, + kind=VOIDLTR, + name='void', + char='V', + w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + #alternate_constructors=[space.w_buffer], + # XXX no buffer in space + ) self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, - self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, - self.w_float64dtype + self.w_int64dtype, self.w_uint64dtype, + self.w_float32dtype, + self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype, + self.w_voiddtype, ] - self.dtypes_by_num_bytes = sorted( + self.float_dtypes_by_num_bytes = sorted( (dtype.itemtype.get_element_size(), dtype) - for dtype in self.builtin_dtypes + for dtype in [self.w_float32dtype, self.w_float64dtype] ) + self.dtypes_by_name = {} + # we reverse, so the stuff with lower numbers override stuff with + # higher numbers + for dtype in reversed(self.builtin_dtypes): + self.dtypes_by_name[dtype.name] = dtype + can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + self.dtypes_by_name[can_name] = dtype + self.dtypes_by_name[byteorder_prefix + can_name] = dtype + self.dtypes_by_name['=' + can_name] = dtype + new_name = nonnative_byteorder_prefix + can_name + itemtypename = dtype.itemtype.__class__.__name__ + itemtype = getattr(types, 'NonNative' + itemtypename)() + self.dtypes_by_name[new_name] = W_Dtype( + itemtype, + dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, + native=False) + for alias in dtype.aliases: + self.dtypes_by_name[alias] = dtype + self.dtypes_by_name[dtype.char] = dtype + + typeinfo_full = { + 'LONGLONG': self.w_int64dtype, + 'SHORT': self.w_int16dtype, + 'VOID': self.w_voiddtype, + #'LONGDOUBLE':, + 'UBYTE': self.w_uint8dtype, + 'UINTP': self.w_ulongdtype, + 'ULONG': self.w_ulongdtype, + 'LONG': self.w_longdtype, + 'UNICODE': self.w_unicodedtype, + #'OBJECT', + 'ULONGLONG': self.w_uint64dtype, + 'STRING': self.w_stringdtype, + #'CDOUBLE', + #'DATETIME', + 'UINT': self.w_uint32dtype, + 'INTP': self.w_longdtype, + #'HALF', + 'BYTE': self.w_int8dtype, + #'CFLOAT': , + #'TIMEDELTA', + 'INT': self.w_int32dtype, + 'DOUBLE': self.w_float64dtype, + 'USHORT': self.w_uint16dtype, + 'FLOAT': self.w_float32dtype, + 'BOOL': self.w_booldtype, + #, 'CLONGDOUBLE'] + } + typeinfo_partial = { + 'Generic': interp_boxes.W_GenericBox, + 'Character': interp_boxes.W_CharacterBox, + 'Flexible': interp_boxes.W_FlexibleBox, + 'Inexact': interp_boxes.W_InexactBox, + 'Integer': interp_boxes.W_IntegerBox, + 'SignedInteger': interp_boxes.W_SignedIntegerBox, + 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, + #'ComplexFloating', + 'Number': interp_boxes.W_NumberBox, + 'Floating': interp_boxes.W_FloatingBox + } + w_typeinfo = space.newdict() + for k, v in typeinfo_partial.iteritems(): + space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) + for k, dtype in typeinfo_full.iteritems(): + itemsize = dtype.itemtype.get_element_size() + items_w = [space.wrap(dtype.char), + space.wrap(dtype.num), + space.wrap(itemsize * 8), # in case of changing + # number of bits per byte in the future + space.wrap(itemsize or 1)] + if dtype.is_int_type(): + if dtype.kind == BOOLLTR: + w_maxobj = space.wrap(1) + w_minobj = space.wrap(0) + elif dtype.is_signed(): + w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + else: + w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_minobj = space.wrap(0) + items_w = items_w + [w_maxobj, w_minobj] + items_w = items_w + [dtype.w_box_type] + + w_tuple = space.newtuple(items_w) + space.setitem(w_typeinfo, space.wrap(k), w_tuple) + self.w_typeinfo = w_typeinfo def get_dtype_cache(space): return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,7 +2,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate from pypy.module.micronumpy.strides import calculate_broadcast_strides,\ - calculate_slice_strides, calculate_dot_strides + calculate_slice_strides, calculate_dot_strides, enumerate_chunks """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see @@ -42,28 +42,81 @@ we can go faster. All the calculations happen in next() -next_step_x() tries to do the iteration for a number of steps at once, +next_skip_x() tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ # structures to describe slicing -class Chunk(object): +class BaseChunk(object): + pass + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + + arr = arr.get_concrete() + ofs, subdtype = arr.dtype.fields[self.name] + # strides backstrides are identical, ofs only changes start + return W_NDimSlice(arr.start + ofs, arr.strides[:], arr.backstrides[:], + arr.shape[:], arr, subdtype) + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice,\ + VirtualSlice, ConcreteArray + + shape = self.extend_shape(arr.shape) + if not isinstance(arr, ConcreteArray): + return VirtualSlice(arr, self, shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.strides, + arr.backstrides, self.l) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], arr) + + +class Chunk(BaseChunk): + axis_step = 1 + def __init__(self, start, stop, step, lgt): self.start = start self.stop = stop self.step = step self.lgt = lgt - def extend_shape(self, shape): - if self.step != 0: - shape.append(self.lgt) - def __repr__(self): return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, self.lgt) +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + class BaseTransform(object): pass @@ -95,17 +148,19 @@ raise NotImplementedError class ArrayIterator(BaseIterator): - def __init__(self, size): + def __init__(self, size, element_size): self.offset = 0 self.size = size + self.element_size = element_size def next(self, shapelen): return self.next_skip_x(1) - def next_skip_x(self, ofs): + def next_skip_x(self, x): arr = instantiate(ArrayIterator) arr.size = self.size - arr.offset = self.offset + ofs + arr.offset = self.offset + x * self.element_size + arr.element_size = self.element_size return arr def next_no_increase(self, shapelen): @@ -152,7 +207,7 @@ elif isinstance(t, ViewTransform): r = calculate_slice_strides(self.res_shape, self.offset, self.strides, - self.backstrides, t.chunks) + self.backstrides, t.chunks.l) return ViewIterator(r[1], r[2], r[3], r[0]) @jit.unroll_safe @@ -214,7 +269,7 @@ def apply_transformations(self, arr, transformations): v = BaseIterator.apply_transformations(self, arr, transformations) - if len(arr.shape) == 1: + if len(arr.shape) == 1 and len(v.res_shape) == 1: return OneDimIterator(self.offset, self.strides[0], self.res_shape[0]) return v diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,10 +7,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.dot import multidim_dot, match_dot_shapes from pypy.module.micronumpy.interp_iter import (ArrayIterator, - SkipLastAxisIterator, Chunk, ViewIterator) -from pypy.module.micronumpy.strides import (calculate_slice_strides, - shape_agreement, find_shape_and_elems, get_shape_from_iterable, - calc_new_strides, to_coords) + SkipLastAxisIterator, Chunk, ViewIterator, Chunks, RecordChunk, + NewAxisChunk) +from pypy.module.micronumpy.strides import (shape_agreement, + find_shape_and_elems, get_shape_from_iterable, calc_new_strides, to_coords) from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder from pypy.rpython.lltypesystem import lltype, rffi @@ -47,7 +47,7 @@ ) flat_set_driver = jit.JitDriver( greens=['shapelen', 'base'], - reds=['step', 'ai', 'lngth', 'arr', 'basei'], + reds=['step', 'lngth', 'ri', 'arr', 'basei'], name='numpy_flatset', ) @@ -79,12 +79,13 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + shape = _find_shape(space, w_size) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def _unaryop_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + def impl(self, space, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -93,8 +94,9 @@ descr_invert = _unaryop_impl("invert") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_out=None): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -102,6 +104,7 @@ descr_mul = _binop_impl("multiply") descr_div = _binop_impl("divide") descr_truediv = _binop_impl("true_divide") + descr_floordiv = _binop_impl("floor_divide") descr_mod = _binop_impl("mod") descr_pow = _binop_impl("power") descr_lshift = _binop_impl("left_shift") @@ -123,12 +126,12 @@ return space.newtuple([w_quotient, w_remainder]) def _binop_right_impl(ufunc_name): - def impl(self, space, w_other): + def impl(self, space, w_other, w_out=None): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -136,6 +139,7 @@ descr_rmul = _binop_right_impl("multiply") descr_rdiv = _binop_right_impl("divide") descr_rtruediv = _binop_right_impl("true_divide") + descr_rfloordiv = _binop_right_impl("floor_divide") descr_rmod = _binop_right_impl("mod") descr_rpow = _binop_right_impl("power") descr_rlshift = _binop_right_impl("left_shift") @@ -150,13 +154,21 @@ return space.newtuple([w_quotient, w_remainder]) def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_axis=None): + def impl(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) + if space.is_w(w_out, space.w_None) or not w_out: + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, promote_to_largest, axis) + self, True, promote_to_largest, axis, + False, out) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -211,6 +223,7 @@ def descr_dot(self, space, w_other): other = convert_to_array(space, w_other) if isinstance(other, Scalar): + #Note: w_out is not modified, this is numpy compliant. return self.descr_mul(space, other) elif len(self.shape) < 2 and len(other.shape) < 2: w_res = self.descr_mul(space, other) @@ -223,8 +236,7 @@ return scalar_w(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - out_size = support.product(out_shape) - result = W_NDimArray(out_size, out_shape, dtype) + result = W_NDimArray(out_shape, dtype) # This is the place to add fpypy and blas return multidim_dot(space, self.get_concrete(), other.get_concrete(), result, dtype, @@ -243,7 +255,7 @@ return space.wrap(self.find_dtype().itemtype.get_element_size()) def descr_get_nbytes(self, space): - return space.wrap(self.size * self.find_dtype().itemtype.get_element_size()) + return space.wrap(self.size) @jit.unroll_safe def descr_get_shape(self, space): @@ -251,13 +263,16 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.size, w_iterable) + support.product(self.shape), w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) + + def get_size(self): + return self.size // self.find_dtype().get_size() def descr_copy(self, space): return self.copy(space) @@ -277,7 +292,7 @@ def empty_copy(self, space, dtype): shape = self.shape - return W_NDimArray(support.product(shape), shape[:], dtype, 'C') + return W_NDimArray(shape[:], dtype, 'C') def descr_len(self, space): if len(self.shape): @@ -318,7 +333,16 @@ """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape """ + if space.isinstance_w(w_idx, space.w_str): + return False shape_len = len(self.shape) + if space.isinstance_w(w_idx, space.w_tuple): + for w_item in space.fixedview(w_idx): + if (space.isinstance_w(w_item, space.w_slice) or + space.is_w(w_item, space.w_None)): + return False + elif space.is_w(w_idx, space.w_None): + return False if shape_len == 0: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -334,43 +358,55 @@ if lgt > shape_len: raise OperationError(space.w_IndexError, space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True + return lgt == shape_len @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_str): + idx = space.str_w(w_idx) + dtype = self.find_dtype() + if not dtype.is_record_type() or idx not in dtype.fields: + raise OperationError(space.w_ValueError, space.wrap( + "field named %s not defined" % idx)) + return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): - return [Chunk(*space.decode_index4(w_idx, self.shape[0]))] - return [Chunk(*space.decode_index4(w_item, self.shape[i])) for i, w_item in - enumerate(space.fixedview(w_idx))] + return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))]) + elif space.is_w(w_idx, space.w_None): + return Chunks([NewAxisChunk()]) + result = [] + i = 0 + for w_item in space.fixedview(w_idx): + if space.is_w(w_item, space.w_None): + result.append(NewAxisChunk()) + else: + result.append(Chunk(*space.decode_index4(w_item, + self.shape[i]))) + i += 1 + return Chunks(result) - def count_all_true(self, arr): - sig = arr.find_sig() - frame = sig.create_frame(arr) - shapelen = len(arr.shape) + def count_all_true(self): + sig = self.find_sig() + frame = sig.create_frame(self) + shapelen = len(self.shape) s = 0 iter = None while not frame.done(): - count_driver.jit_merge_point(arr=arr, frame=frame, iter=iter, s=s, + count_driver.jit_merge_point(arr=self, frame=frame, iter=iter, s=s, shapelen=shapelen) iter = frame.get_final_iter() - s += arr.dtype.getitem_bool(arr.storage, iter.offset) + s += self.dtype.getitem_bool(self, iter.offset) frame.next(shapelen) return s def getitem_filter(self, space, arr): concr = arr.get_concrete() - if concr.size > self.size: + if concr.get_size() > self.get_size(): raise OperationError(space.w_IndexError, space.wrap("index out of range for array")) - size = self.count_all_true(concr) - res = W_NDimArray(size, [size], self.find_dtype()) - ri = ArrayIterator(size) + size = concr.count_all_true() + res = W_NDimArray([size], self.find_dtype()) + ri = res.create_iter() shapelen = len(self.shape) argi = concr.create_iter() sig = self.find_sig() @@ -380,7 +416,7 @@ filter_driver.jit_merge_point(concr=concr, argi=argi, ri=ri, frame=frame, v=v, res=res, sig=sig, shapelen=shapelen, self=self) - if concr.dtype.getitem_bool(concr.storage, argi.offset): + if concr.dtype.getitem_bool(concr, argi.offset): v = sig.eval(frame, self) res.setitem(ri.offset, v) ri = ri.next(1) @@ -390,23 +426,6 @@ frame.next(shapelen) return res - def setitem_filter(self, space, idx, val): - size = self.count_all_true(idx) - arr = SliceArray([size], self.dtype, self, val) - sig = arr.find_sig() - shapelen = len(self.shape) - frame = sig.create_frame(arr) - idxi = idx.create_iter() - while not frame.done(): - filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, - frame=frame, arr=arr, - shapelen=shapelen) - if idx.dtype.getitem_bool(idx.storage, idxi.offset): - sig.eval(frame, arr) - frame.next_from_second(1) - frame.next_first(shapelen) - idxi = idxi.next(shapelen) - def descr_getitem(self, space, w_idx): if (isinstance(w_idx, BaseArray) and w_idx.shape == self.shape and w_idx.find_dtype().is_bool_type()): @@ -416,7 +435,24 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return self.create_slice(chunks) + return chunks.apply(self) + + def setitem_filter(self, space, idx, val): + size = idx.count_all_true() + arr = SliceArray([size], self.dtype, self, val) + sig = arr.find_sig() + shapelen = len(self.shape) + frame = sig.create_frame(arr) + idxi = idx.create_iter() + while not frame.done(): + filter_set_driver.jit_merge_point(idx=idx, idxi=idxi, sig=sig, + frame=frame, arr=arr, + shapelen=shapelen) + if idx.dtype.getitem_bool(idx, idxi.offset): + sig.eval(frame, arr) + frame.next_from_second(1) + frame.next_first(shapelen) + idxi = idxi.next(shapelen) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -434,26 +470,9 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(chunks).get_concrete() + view = chunks.apply(self).get_concrete() view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, chunks): - shape = [] - i = -1 - for i, chunk in enumerate(chunks): - chunk.extend_shape(shape) - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - if not isinstance(self, ConcreteArray): - return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.shape, self.start, self.strides, - self.backstrides, chunks) - _, start, strides, backstrides = r - return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], self) - def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -470,13 +489,16 @@ w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - new_shape = get_shape_from_iterable(space, self.size, w_shape) + new_shape = get_shape_from_iterable(space, support.product(self.shape), + w_shape) return self.reshape(space, new_shape) def reshape(self, space, new_shape): concrete = self.get_concrete() # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, concrete.shape, + new_strides = None + if self.size > 0: + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides, concrete.order) if new_strides: # We can create a view, strides somehow match up. @@ -503,14 +525,14 @@ ) return w_result - def descr_mean(self, space, w_axis=None): + def descr_mean(self, space, w_axis=None, w_out=None): if space.is_w(w_axis, space.w_None): w_axis = space.wrap(-1) - w_denom = space.wrap(self.size) + w_denom = space.wrap(support.product(self.shape)) else: dim = space.int_w(w_axis) w_denom = space.wrap(self.shape[dim]) - return space.div(self.descr_sum_promote(space, w_axis), w_denom) + return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom) def descr_var(self, space, w_axis=None): return get_appbridge_cache(space).call_method(space, '_var', self, @@ -525,7 +547,7 @@ concr.fill(space, w_value) def descr_nonzero(self, space): - if self.size > 1: + if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -604,8 +626,7 @@ space.wrap("axis unsupported for take")) index_i = index.create_iter() res_shape = index.shape - size = support.product(res_shape) - res = W_NDimArray(size, res_shape[:], concr.dtype, concr.order) + res = W_NDimArray(res_shape[:], concr.dtype, concr.order) res_i = res.create_iter() shapelen = len(index.shape) sig = concr.find_sig() @@ -644,6 +665,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_tostring(self, space): + ra = ToStringArray(self) + loop.compute(ra) + return space.wrap(ra.s.build()) + def compute_first_step(self, sig, frame): pass @@ -665,8 +691,7 @@ """ Intermediate class representing a literal. """ - size = 1 - _attrs_ = ["dtype", "value", "shape"] + _attrs_ = ["dtype", "value", "shape", "size"] def __init__(self, dtype, value): self.shape = [] @@ -674,6 +699,7 @@ self.dtype = dtype assert isinstance(value, interp_boxes.W_GenericBox) self.value = value + self.size = dtype.get_size() def find_dtype(self): return self.dtype @@ -691,8 +717,7 @@ return self def reshape(self, space, new_shape): - size = support.product(new_shape) - res = W_NDimArray(size, new_shape, self.dtype, 'C') + res = W_NDimArray(new_shape, self.dtype, 'C') res.setitem(0, self.value) return res @@ -700,11 +725,13 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype): + def __init__(self, name, shape, res_dtype, out_arg=None): BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name + self.res = out_arg + self.size = support.product(self.shape) * res_dtype.get_size() def _del_sources(self): # Function for deleting references to source arrays, @@ -712,13 +739,18 @@ raise NotImplementedError def compute(self): - ra = ResultArray(self, self.size, self.shape, self.res_dtype) + ra = ResultArray(self, self.shape, self.res_dtype, self.res) loop.compute(ra) + if self.res: + broadcast_dims = len(self.res.shape) - len(self.shape) + chunks = [Chunk(0,0,0,0)] * broadcast_dims + \ + [Chunk(0, i, 1, i) for i in self.shape] + return Chunks(chunks).apply(self.res) return ra.left def force_if_needed(self): if self.forced_result is None: - self.forced_result = self.compute() + self.forced_result = self.compute().get_concrete() self._del_sources() def get_concrete(self): @@ -740,7 +772,6 @@ def __init__(self, child, chunks, shape): self.child = child self.chunks = chunks - self.size = support.product(shape) VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) def create_sig(self): @@ -752,15 +783,16 @@ def force_if_needed(self): if self.forced_result is None: concr = self.child.get_concrete() - self.forced_result = concr.create_slice(self.chunks) + self.forced_result = self.chunks.apply(concr) def _del_sources(self): self.child = None class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.values = values self.size = values.size self.ufunc = ufunc @@ -772,6 +804,12 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() + if self.shape != self.values.shape: + #This happens if out arg is used + return signature.BroadcastUfunc(self.ufunc, self.name, + self.calc_dtype, + self.values.create_sig(), + self.res.create_sig()) return signature.Call1(self.ufunc, self.name, self.calc_dtype, self.values.create_sig()) @@ -779,13 +817,13 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right, + out_arg=None): + VirtualArray.__init__(self, name, shape, res_dtype, out_arg) self.ufunc = ufunc self.left = left self.right = right self.calc_dtype = calc_dtype - self.size = support.product(self.shape) def _del_sources(self): self.left = None @@ -813,14 +851,34 @@ self.left.create_sig(), self.right.create_sig()) class ResultArray(Call2): - def __init__(self, child, size, shape, dtype, res=None, order='C'): + def __init__(self, child, shape, dtype, res=None, order='C'): if res is None: - res = W_NDimArray(size, shape, dtype, order) + res = W_NDimArray(shape, dtype, order) Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child) def create_sig(self): - return signature.ResultSignature(self.res_dtype, self.left.create_sig(), - self.right.create_sig()) + if self.left.shape != self.right.shape: + sig = signature.BroadcastResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + else: + sig = signature.ResultSignature(self.res_dtype, + self.left.create_sig(), self.right.create_sig()) + return sig + +class ToStringArray(Call1): + def __init__(self, child): + dtype = child.find_dtype() + self.item_size = dtype.itemtype.get_element_size() + self.s = StringBuilder(child.size * self.item_size) + Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype, + child) + self.res_str = W_NDimArray([1], dtype, order='C') + self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), + self.res_str.storage) + + def create_sig(self): + return signature.ToStringSignature(self.calc_dtype, + self.values.create_sig()) def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -893,13 +951,13 @@ """ _immutable_fields_ = ['storage'] - def __init__(self, size, shape, dtype, order='C', parent=None): - self.size = size + def __init__(self, shape, dtype, order='C', parent=None): self.parent = parent + self.size = support.product(shape) * dtype.get_size() if parent is not None: self.storage = parent.storage else: - self.storage = dtype.malloc(size) + self.storage = dtype.itemtype.malloc(self.size) self.order = order self.dtype = dtype if self.strides is None: @@ -918,13 +976,14 @@ return self.dtype def getitem(self, item): - return self.dtype.getitem(self.storage, item) + return self.dtype.getitem(self, item) def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value.convert_to(self.dtype)) def calc_strides(self, shape): + dtype = self.find_dtype() strides = [] backstrides = [] s = 1 @@ -932,8 +991,8 @@ if self.order == 'C': shape_rev.reverse() for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) s *= sh if self.order == 'C': strides.reverse() @@ -981,9 +1040,9 @@ shapelen = len(self.shape) if shapelen == 1: rffi.c_memcpy( - rffi.ptradd(self.storage, self.start * itemsize), - rffi.ptradd(w_value.storage, w_value.start * itemsize), - self.size * itemsize + rffi.ptradd(self.storage, self.start), + rffi.ptradd(w_value.storage, w_value.start), + self.size ) else: dest = SkipLastAxisIterator(self) @@ -998,7 +1057,7 @@ dest.next() def copy(self, space): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.shape[:], self.dtype, self.order) array.setslice(space, self) return array @@ -1012,14 +1071,15 @@ class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): + def __init__(self, start, strides, backstrides, shape, parent, dtype=None): assert isinstance(parent, ConcreteArray) if isinstance(parent, W_NDimSlice): parent = parent.parent self.strides = strides self.backstrides = backstrides - ViewArray.__init__(self, support.product(shape), shape, parent.dtype, - parent.order, parent) + if dtype is None: + dtype = parent.dtype + ViewArray.__init__(self, shape, dtype, parent.order, parent) self.start = start def create_iter(self, transforms=None): @@ -1029,18 +1089,19 @@ def setshape(self, space, new_shape): if len(self.shape) < 1: return - elif len(self.shape) < 2: + elif len(self.shape) < 2 or self.size < 1: # TODO: this code could be refactored into calc_strides # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] - s = self.strides[0] + dtype = self.find_dtype() + s = self.strides[0] // dtype.get_size() if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh + strides.append(s * dtype.get_size()) + backstrides.append(s * (sh - 1) * dtype.get_size()) + s *= max(1, sh) if self.order == 'C': strides.reverse() backstrides.reverse() @@ -1067,14 +1128,16 @@ """ def setitem(self, item, value): self.invalidated() - self.dtype.setitem(self.storage, item, value) + self.dtype.setitem(self, item, value) def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) def create_iter(self, transforms=None): - return ArrayIterator(self.size).apply_transformations(self, transforms) + esize = self.find_dtype().get_size() + return ArrayIterator(self.size, esize).apply_transformations(self, + transforms) def create_sig(self): return signature.ArraySignature(self.dtype) @@ -1082,22 +1145,18 @@ def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) -def _find_size_and_shape(space, w_size): +def _find_shape(space, w_size): if space.isinstance_w(w_size, space.w_int): - size = space.int_w(w_size) - shape = [size] - else: - size = 1 - shape = [] - for w_item in space.fixedview(w_size): - item = space.int_w(w_item) - size *= item - shape.append(item) - return size, shape + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + return shape @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool) def array(space, w_item_or_iterable, w_dtype=None, w_order=None, - subok=True, copy=True, w_maskna=None, ownmaskna=False): + subok=True, copy=True, w_maskna=None, ownmaskna=False, + w_ndmin=None): # find scalar if w_maskna is None: w_maskna = space.w_None @@ -1127,28 +1186,33 @@ if copy: return w_item_or_iterable.copy(space) return w_item_or_iterable - shape, elems_w = find_shape_and_elems(space, w_item_or_iterable) + if w_dtype is None or space.is_w(w_dtype, space.w_None): + dtype = None + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + shape, elems_w = find_shape_and_elems(space, w_item_or_iterable, dtype) # they come back in C order - size = len(elems_w) - if w_dtype is None or space.is_w(w_dtype, space.w_None): - w_dtype = None + if dtype is None: for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + dtype) + if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + if dtype is None: + dtype = interp_dtype.get_dtype_cache(space).w_float64dtype shapelen = len(shape) - arr_iter = ArrayIterator(arr.size) + if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None): + ndmin = space.int_w(w_ndmin) + if ndmin > shapelen: + shape = [1] * (ndmin - shapelen) + shape + shapelen = ndmin + arr = W_NDimArray(shape[:], dtype=dtype, order=order) + arr_iter = arr.create_iter() # XXX we might want to have a jitdriver here for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, + dtype.setitem(arr, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1157,22 +1221,22 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - size, shape = _find_size_and_shape(space, w_size) + shape = _find_shape(space, w_size) if not shape: return scalar_w(space, dtype, space.wrap(1)) - arr = W_NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(shape[:], dtype=dtype) one = dtype.box(1) - arr.dtype.fill(arr.storage, one, 0, size) + arr.dtype.fill(arr.storage, one, 0, arr.size) return space.wrap(arr) @unwrap_spec(arr=BaseArray, skipna=bool, keepdims=bool) @@ -1220,13 +1284,13 @@ "array dimensions must agree except for axis being concatenated")) elif i == axis: shape[i] += axis_size - res = W_NDimArray(support.product(shape), shape, dtype, 'C') + res = W_NDimArray(shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: chunks[axis] = Chunk(axis_start, axis_start + arr.shape[axis], 1, arr.shape[axis]) - res.create_slice(chunks).setslice(space, arr) + Chunks(chunks).apply(res).setslice(space, arr) axis_start += arr.shape[axis] return res @@ -1250,6 +1314,7 @@ __mul__ = interp2app(BaseArray.descr_mul), __div__ = interp2app(BaseArray.descr_div), __truediv__ = interp2app(BaseArray.descr_truediv), + __floordiv__ = interp2app(BaseArray.descr_floordiv), __mod__ = interp2app(BaseArray.descr_mod), __divmod__ = interp2app(BaseArray.descr_divmod), __pow__ = interp2app(BaseArray.descr_pow), @@ -1264,6 +1329,7 @@ __rmul__ = interp2app(BaseArray.descr_rmul), __rdiv__ = interp2app(BaseArray.descr_rdiv), __rtruediv__ = interp2app(BaseArray.descr_rtruediv), + __rfloordiv__ = interp2app(BaseArray.descr_rfloordiv), __rmod__ = interp2app(BaseArray.descr_rmod), __rdivmod__ = interp2app(BaseArray.descr_rdivmod), __rpow__ = interp2app(BaseArray.descr_rpow), @@ -1312,6 +1378,7 @@ std = interp2app(BaseArray.descr_std), fill = interp2app(BaseArray.descr_fill), + tostring = interp2app(BaseArray.descr_tostring), copy = interp2app(BaseArray.descr_copy), flatten = interp2app(BaseArray.descr_flatten), @@ -1334,7 +1401,7 @@ self.iter = sig.create_frame(arr).get_final_iter() self.base = arr self.index = 0 - ViewArray.__init__(self, arr.size, [arr.size], arr.dtype, arr.order, + ViewArray.__init__(self, [arr.get_size()], arr.dtype, arr.order, arr) def descr_next(self, space): @@ -1349,7 +1416,7 @@ return self def descr_len(self, space): - return space.wrap(self.size) + return space.wrap(self.get_size()) def descr_index(self, space): return space.wrap(self.index) @@ -1367,28 +1434,26 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) # setslice would have been better, but flat[u:v] for arbitrary # shapes of array a cannot be represented as a[x1:x2, y1:y2] basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) if lngth <2: return base.getitem(basei.offset) - ri = ArrayIterator(lngth) - res = W_NDimArray(lngth, [lngth], base.dtype, - base.order) + res = W_NDimArray([lngth], base.dtype, base.order) + ri = res.create_iter() while not ri.done(): flat_get_driver.jit_merge_point(shapelen=shapelen, base=base, basei=basei, step=step, res=res, - ri=ri, - ) + ri=ri) w_val = base.getitem(basei.offset) - res.setitem(ri.offset,w_val) + res.setitem(ri.offset, w_val) basei = basei.next_skip_x(shapelen, step) ri = ri.next(shapelen) return res @@ -1399,27 +1464,28 @@ raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base - start, stop, step, lngth = space.decode_index4(w_idx, base.size) + start, stop, step, lngth = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) - ai = 0 + ri = arr.create_iter() basei = ViewIterator(base.start, base.strides, - base.backstrides,base.shape) + base.backstrides, base.shape) shapelen = len(base.shape) basei = basei.next_skip_x(shapelen, start) while lngth > 0: flat_set_driver.jit_merge_point(shapelen=shapelen, - basei=basei, - base=base, - step=step, - arr=arr, - ai=ai, - lngth=lngth, - ) - v = arr.getitem(ai).convert_to(base.dtype) + basei=basei, + base=base, + step=step, + arr=arr, + lngth=lngth, + ri=ri) + v = arr.getitem(ri.offset).convert_to(base.dtype) base.setitem(basei.offset, v) # need to repeat input values until all assignments are done - ai = (ai + 1) % arr.size basei = basei.next_skip_x(shapelen, step) + ri = ri.next(shapelen) + # WTF is numpy thinking? + ri.offset %= arr.size lngth -= 1 def create_sig(self): @@ -1427,9 +1493,9 @@ def create_iter(self, transforms=None): return ViewIterator(self.base.start, self.base.strides, - self.base.backstrides, - self.base.shape).apply_transformations(self.base, - transforms) + self.base.backstrides, + self.base.shape).apply_transformations(self.base, + transforms) def descr_base(self, space): return space.wrap(self.base) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -51,9 +51,11 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(num_items, [num_items], dtype=dtype) - for i, val in enumerate(items): - a.dtype.setitem(a.storage, i, val) + a = W_NDimArray([num_items], dtype=dtype) + ai = a.create_iter() + for val in items: + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) return space.wrap(a) @@ -61,6 +63,7 @@ from pypy.module.micronumpy.interp_numarray import W_NDimArray itemsize = dtype.itemtype.get_element_size() + assert itemsize >= 0 if count == -1: count = length / itemsize if length % itemsize != 0: @@ -71,20 +74,23 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray(count, [count], dtype=dtype) - fromstring_loop(a, count, dtype, itemsize, s) + a = W_NDimArray([count], dtype=dtype) + fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) -fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize', - 'dtype', 's', 'a']) +fromstring_driver = jit.JitDriver(greens=[], reds=['i', 'itemsize', + 'dtype', 'ai', 's', 'a']) -def fromstring_loop(a, count, dtype, itemsize, s): +def fromstring_loop(a, dtype, itemsize, s): i = 0 - while i < count: - fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype, - itemsize=itemsize, s=s, i=i) + ai = a.create_iter() + while not ai.done(): + fromstring_driver.jit_merge_point(a=a, dtype=dtype, + itemsize=itemsize, s=s, i=i, + ai=ai) val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) - a.dtype.setitem(a.storage, i, val) + a.dtype.setitem(a, ai.offset, val) + ai = ai.next(1) i += 1 @unwrap_spec(s=str, count=int, sep=str) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -28,26 +28,38 @@ return self.identity def descr_call(self, space, __args__): + from interp_numarray import BaseArray args_w, kwds_w = __args__.unpack() # it occurs to me that we don't support any datatypes that # require casting, change it later when we do kwds_w.pop('casting', None) w_subok = kwds_w.pop('subok', None) w_out = kwds_w.pop('out', space.w_None) - if ((w_subok is not None and space.is_true(w_subok)) or - not space.is_w(w_out, space.w_None)): + # Setup a default value for out + if space.is_w(w_out, space.w_None): + out = None + else: + out = w_out + if (w_subok is not None and space.is_true(w_subok)): raise OperationError(space.w_NotImplementedError, space.wrap("parameters unsupported")) if kwds_w or len(args_w) < self.argcount: raise OperationError(space.w_ValueError, space.wrap("invalid number of arguments") ) - elif len(args_w) > self.argcount: - # The extra arguments should actually be the output array, but we - # don't support that yet. + elif (len(args_w) > self.argcount and out is not None) or \ + (len(args_w) > self.argcount + 1): raise OperationError(space.w_TypeError, space.wrap("invalid number of arguments") ) + # Override the default out value, if it has been provided in w_wargs + if len(args_w) > self.argcount: + out = args_w[-1] + else: + args_w = args_w[:] + [out] + if out is not None and not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) return self.call(space, args_w) @unwrap_spec(skipna=bool, keepdims=bool) @@ -105,28 +117,33 @@ array([[ 1, 5], [ 9, 13]]) """ - if not space.is_w(w_out, space.w_None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out not supported")) + from pypy.module.micronumpy.interp_numarray import BaseArray if w_axis is None: axis = 0 elif space.is_w(w_axis, space.w_None): axis = -1 else: axis = space.int_w(w_axis) - return self.reduce(space, w_obj, False, False, axis, keepdims) + if space.is_w(w_out, space.w_None): + out = None + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + return self.reduce(space, w_obj, False, False, axis, keepdims, out) - def reduce(self, space, w_obj, multidim, promote_to_largest, dim, - keepdims=False): + def reduce(self, space, w_obj, multidim, promote_to_largest, axis, + keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ - Scalar, ReduceArray + Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if dim >= len(obj.shape): - raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) + if axis >= len(obj.shape): + raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) @@ -144,21 +161,55 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen > 1 and dim >= 0: - return self.do_axis_reduce(obj, dtype, dim, keepdims) - arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) - return loop.compute(arr) + if shapelen > 1 and axis >= 0: + if keepdims: + shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] + else: + shape = obj.shape[:axis] + obj.shape[axis + 1:] + if out: + #Test for shape agreement + if len(out.shape) > len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' has too many dimensions', self.name) + elif len(out.shape) < len(shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter for reduction operation %s' + + ' does not have enough dimensions', self.name) + elif out.shape != shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, expecting [%s]' + + ' , got [%s]', + ",".join([str(x) for x in shape]), + ",".join([str(x) for x in out.shape]), + ) + #Test for dtype agreement, perhaps create an itermediate + #if out.dtype != dtype: + # raise OperationError(space.w_TypeError, space.wrap( + # "mismatched dtypes")) + return self.do_axis_reduce(obj, out.find_dtype(), axis, out) + else: + result = W_NDimArray(shape, dtype) + return self.do_axis_reduce(obj, dtype, axis, result) + if out: + if len(out.shape)>0: + raise operationerrfmt(space.w_ValueError, "output parameter " + "for reduction operation %s has too many" + " dimensions",self.name) + arr = ReduceArray(self.func, self.name, self.identity, obj, + out.find_dtype()) + val = loop.compute(arr) + assert isinstance(out, Scalar) + out.value = val + else: + arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) + val = loop.compute(arr) + return val - def do_axis_reduce(self, obj, dtype, dim, keepdims): - from pypy.module.micronumpy.interp_numarray import AxisReduce,\ - W_NDimArray - if keepdims: - shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:] - else: - shape = obj.shape[:dim] + obj.shape[dim + 1:] - result = W_NDimArray(support.product(shape), shape, dtype) + def do_axis_reduce(self, obj, dtype, axis, result): + from pypy.module.micronumpy.interp_numarray import AxisReduce arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype, - result, obj, dim) + result, obj, axis) loop.compute(arr) return arr.left @@ -176,24 +227,55 @@ self.bool_result = bool_result def call(self, space, args_w): - from pypy.module.micronumpy.interp_numarray import (Call1, - convert_to_array, Scalar) - - [w_obj] = args_w + from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, + convert_to_array, Scalar, shape_agreement) + if len(args_w)<2: + [w_obj] = args_w + out = None + else: + [w_obj, out] = args_w + if space.is_w(out, space.w_None): + out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if self.bool_result: + if out: + if not isinstance(out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + res_dtype = out.find_dtype() + elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): - return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) - - w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, - w_obj) + arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) + if out: + assert isinstance(out, BaseArray) # For translation + broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) + if not broadcast_shape or broadcast_shape != out.shape: + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in w_obj.shape]), + ",".join([str(x) for x in out.shape]), + ) + w_res = Call1(self.func, self.name, out.shape, calc_dtype, + res_dtype, w_obj, out) + #Force it immediately + w_res.get_concrete() + else: + w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, + res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res @@ -212,32 +294,61 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + convert_to_array, Scalar, shape_agreement, BaseArray) + if len(args_w)>2: + [w_lhs, w_rhs, w_out] = args_w + else: + [w_lhs, w_rhs] = args_w + w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - calc_dtype = find_binop_result_dtype(space, - w_lhs.find_dtype(), w_rhs.find_dtype(), - int_only=self.int_only, - promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - ) + if space.is_w(w_out, space.w_None) or w_out is None: + out = None + calc_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + int_only=self.int_only, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + elif not isinstance(w_out, BaseArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out + calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return space.wrap(self.func(calc_dtype, + arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - )) + ) + if isinstance(out,Scalar): + out.value=arr + elif isinstance(out, BaseArray): + out.fill(space, arr) + else: + out = arr + return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + # Test correctness of out.shape + if out and out.shape != shape_agreement(space, new_shape, out.shape): + raise operationerrfmt(space.w_ValueError, + 'output parameter shape mismatch, could not broadcast [%s]' + + ' to [%s]', + ",".join([str(x) for x in new_shape]), + ",".join([str(x) for x in out.shape]), + ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) + if out: + #out.add_invalidates(w_res) #causes a recursion loop + w_res.get_concrete() return w_res @@ -314,7 +425,7 @@ return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype @@ -388,6 +499,7 @@ "int_only": True}), ("bitwise_xor", "bitwise_xor", 2, {"int_only": True}), ("invert", "invert", 1, {"int_only": True}), + ("floor_divide", "floordiv", 2, {"promote_bools": True}), ("divide", "div", 2, {"promote_bools": True}), ("true_divide", "div", 2, {"promote_to_float": True}), ("mod", "mod", 2, {"promote_bools": True}), @@ -403,6 +515,9 @@ ("greater_equal", "ge", 2, {"comparison_func": True}), ("isnan", "isnan", 1, {"bool_result": True}), ("isinf", "isinf", 1, {"bool_result": True}), + ("isneginf", "isneginf", 1, {"bool_result": True}), + ("isposinf", "isposinf", 1, {"bool_result": True}), + ("isfinite", "isfinite", 1, {"bool_result": True}), ('logical_and', 'logical_and', 2, {'comparison_func': True, 'identity': 1}), @@ -420,12 +535,16 @@ ("negative", "neg", 1), ("absolute", "abs", 1), ("sign", "sign", 1, {"promote_bools": True}), + ("signbit", "signbit", 1, {"bool_result": True}), ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ("exp2", "exp2", 1, {"promote_to_float": True}), + ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), @@ -435,12 +554,23 @@ ("arcsin", "arcsin", 1, {"promote_to_float": True}), ("arccos", "arccos", 1, {"promote_to_float": True}), ("arctan", "arctan", 1, {"promote_to_float": True}), + ("arctan2", "arctan2", 2, {"promote_to_float": True}), ("sinh", "sinh", 1, {"promote_to_float": True}), ("cosh", "cosh", 1, {"promote_to_float": True}), ("tanh", "tanh", 1, {"promote_to_float": True}), ("arcsinh", "arcsinh", 1, {"promote_to_float": True}), ("arccosh", "arccosh", 1, {"promote_to_float": True}), ("arctanh", "arctanh", 1, {"promote_to_float": True}), + + ("radians", "radians", 1, {"promote_to_float": True}), + ("degrees", "degrees", 1, {"promote_to_float": True}), + + ("log", "log", 1, {"promote_to_float": True}), + ("log2", "log2", 1, {"promote_to_float": True}), + ("log10", "log10", 1, {"promote_to_float": True}), + ("log1p", "log1p", 1, {"promote_to_float": True}), + ("logaddexp", "logaddexp", 2, {"promote_to_float": True}), + ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True}), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -4,6 +4,7 @@ ViewTransform, BroadcastTransform from pypy.tool.pairtype import extendabletype from pypy.module.micronumpy.loop import ComputationDone +from pypy.rlib import jit """ Signature specifies both the numpy expression that has been constructed and the assembler to be compiled. This is a very important observation - @@ -142,11 +143,10 @@ from pypy.module.micronumpy.interp_numarray import ConcreteArray concr = arr.get_concrete() assert isinstance(concr, ConcreteArray) - storage = concr.storage if self.iter_no >= len(iterlist): iterlist.append(concr.create_iter(transforms)) if self.array_no >= len(arraylist): - arraylist.append(storage) + arraylist.append(concr) def eval(self, frame, arr): iter = frame.iterators[self.iter_no] @@ -216,13 +216,14 @@ return self.child.eval(frame, arr.child) class Call1(Signature): - _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype'] + _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype'] - def __init__(self, func, name, dtype, child): + def __init__(self, func, name, dtype, child, res=None): self.unfunc = func self.child = child self.name = name self.dtype = dtype + self.res = res def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) @@ -256,6 +257,29 @@ v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) return self.unfunc(arr.calc_dtype, v) + +class BroadcastUfunc(Call1): + def _invent_numbering(self, cache, allnumbers): + self.res._invent_numbering(cache, allnumbers) + self.child._invent_numbering(new_cache(), allnumbers) + + def debug_repr(self): + return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr()) + + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import Call1 + + assert isinstance(arr, Call1) + vtransforms = transforms + [BroadcastTransform(arr.values.shape)] + self.child._create_iter(iterlist, arraylist, arr.values, vtransforms) + self.res._create_iter(iterlist, arraylist, arr.res, transforms) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype) + return self.unfunc(arr.calc_dtype, v) + class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] @@ -316,7 +340,31 @@ assert isinstance(arr, ResultArray) offset = frame.get_final_iter().offset - arr.left.setitem(offset, self.right.eval(frame, arr.right)) + val = self.right.eval(frame, arr.right) + arr.left.setitem(offset, val) + +class BroadcastResultSignature(ResultSignature): + def _create_iter(self, iterlist, arraylist, arr, transforms): + from pypy.module.micronumpy.interp_numarray import ResultArray + + assert isinstance(arr, ResultArray) + rtransforms = transforms + [BroadcastTransform(arr.left.shape)] + self.left._create_iter(iterlist, arraylist, arr.left, transforms) + self.right._create_iter(iterlist, arraylist, arr.right, rtransforms) + +class ToStringSignature(Call1): + def __init__(self, dtype, child): + Call1.__init__(self, None, 'tostring', dtype, child) + + @jit.unroll_safe + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import ToStringArray + + assert isinstance(arr, ToStringArray) + arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to( + self.dtype)) + for i in range(arr.item_size): + arr.s.append(arr.res_str_casted[i]) class BroadcastLeft(Call2): def _invent_numbering(self, cache, allnumbers): @@ -441,6 +489,5 @@ cur = arr.left.getitem(iterator.offset) value = self.binfunc(self.calc_dtype, cur, v) arr.left.setitem(iterator.offset, value) - def debug_repr(self): return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr()) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,14 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError +def enumerate_chunks(chunks): + result = [] + i = -1 + for chunk in chunks: + i += chunk.axis_step + result.append((i, chunk)) + return result + @jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: jit.isconstant(len(chunks)) ) @@ -10,7 +18,7 @@ rstart = start rshape = [] i = -1 - for i, chunk in enumerate(chunks): + for i, chunk in enumerate_chunks(chunks): if chunk.step != 0: rstrides.append(strides[i] * chunk.step) rbackstrides.append(strides[i] * (chunk.lgt - 1) * chunk.step) @@ -38,22 +46,31 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides -def find_shape_and_elems(space, w_iterable): +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if space.issequence_w(w_elem): + return False + return True + +def find_shape_and_elems(space, w_iterable, dtype): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) + is_rec_type = dtype is not None and dtype.is_record_type() while True: new_batch = [] if not batch: return shape, [] - if not space.issequence_w(batch[0]): - for elem in batch: - if space.issequence_w(elem): + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape, batch size = space.len_w(batch[0]) for w_elem in batch: - if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) new_batch += space.listview(w_elem) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,6 +4,8 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) from pypy.module.micronumpy.interp_boxes import W_Float64Box +from pypy.module.micronumpy.interp_dtype import nonnative_byteorder_prefix,\ + byteorder_prefix from pypy.conftest import option import sys @@ -15,14 +17,16 @@ sys.modules['numpypy'] = numpy sys.modules['_numpypy'] = numpy cls.space = gettestobjspace(usemodules=['micronumpy']) + cls.w_non_native_prefix = cls.space.wrap(nonnative_byteorder_prefix) + cls.w_native_prefix = cls.space.wrap(byteorder_prefix) class TestSignature(object): def test_binop_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) - ar2 = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) + ar2 = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(2.0))) sig1 = v1.find_sig() @@ -40,7 +44,7 @@ v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() - bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray([10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.find_sig() is not v1.find_sig() assert v5.find_sig() is not v2.find_sig() @@ -57,7 +61,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray([10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.find_sig() is v2.find_sig() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,5 +1,7 @@ +import py +from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest - +from pypy.interpreter.gateway import interp2app class AppTestDtypes(BaseNumpyAppTest): def test_dtype(self): @@ -12,7 +14,10 @@ assert dtype(d) is d assert dtype(None) is dtype(float) assert dtype('int8').name == 'int8' + assert dtype(int).fields is None + assert dtype(int).names is None raises(TypeError, dtype, 1042) + raises(KeyError, 'dtype(int)["asdasd"]') def test_dtype_eq(self): from _numpypy import dtype @@ -53,13 +58,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from _numpypy import array, False_, True_, int64 + from _numpypy import array, False_, longlong a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], int64) + assert isinstance(a[0], longlong) b = a.copy() - assert isinstance(b[0], int64) + assert isinstance(b[0], longlong) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -81,17 +86,17 @@ assert a[i] is True_ def test_zeros_long(self): - from _numpypy import zeros, int64 + from _numpypy import zeros, longlong a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 0 def test_ones_long(self): - from _numpypy import ones, int64 + from _numpypy import ones, longlong a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], int64) + assert isinstance(a[i], longlong) assert a[1] == 1 def test_overflow(self): @@ -181,17 +186,18 @@ assert dtype("float") is dtype(float) -class AppTestTypes(BaseNumpyAppTest): +class AppTestTypes(BaseNumpyAppTest): def test_abstract_types(self): import _numpypy as numpy raises(TypeError, numpy.generic, 0) raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'signedinteger' instances" + assert 'cannot create' in str(exc.value) + assert 'signedinteger' in str(exc.value) exc = raises(TypeError, numpy.unsignedinteger, 0) - assert str(exc.value) == "cannot create 'unsignedinteger' instances" - + assert 'cannot create' in str(exc.value) + assert 'unsignedinteger' in str(exc.value) raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -296,6 +302,7 @@ else: raises(OverflowError, numpy.int32, 2147483648) raises(OverflowError, numpy.int32, '2147483648') + assert numpy.dtype('int32') is numpy.dtype(numpy.int32) def test_uint32(self): import sys @@ -327,15 +334,11 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - if sys.maxint >= 2 ** 63 - 1: - assert numpy.int64(9223372036854775807) == 9223372036854775807 - assert numpy.int64('9223372036854775807') == 9223372036854775807 - else: - raises(OverflowError, numpy.int64, 9223372036854775807) - raises(OverflowError, numpy.int64, '9223372036854775807') + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) - raises(OverflowError, numpy.int64, '9223372036854775808') + raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): import sys @@ -404,10 +407,29 @@ assert issubclass(int64, int) assert int_ is int64 + def test_various_types(self): + import _numpypy as numpy + import sys + + assert numpy.int16 is numpy.short + assert numpy.int8 is numpy.byte + assert numpy.bool_ is numpy.bool8 + if sys.maxint == (1 << 63) - 1: + assert numpy.intp is numpy.int64 + else: + assert numpy.intp is numpy.int32 + + def test_mro(self): + import _numpypy as numpy + + assert numpy.int16.__mro__ == (numpy.int16, numpy.signedinteger, + numpy.integer, numpy.number, + numpy.generic, object) + assert numpy.bool_.__mro__ == (numpy.bool_, numpy.generic, object) + def test_operators(self): from operator import truediv from _numpypy import float64, int_, True_, False_ - assert 5 / int_(2) == int_(2) assert truediv(int_(3), int_(2)) == float64(1.5) assert truediv(3, int_(2)) == float64(1.5) @@ -427,9 +449,115 @@ assert int_(3) ^ int_(5) == int_(6) assert True_ ^ False_ is True_ assert 5 ^ int_(3) == int_(6) - assert +int_(3) == int_(3) assert ~int_(3) == int_(-4) - raises(TypeError, lambda: float64(3) & 1) + def test_alternate_constructs(self): + from _numpypy import dtype + nnp = self.non_native_prefix + byteorder = self.native_prefix + assert dtype('i8') == dtype(byteorder + 'i8') == dtype('=i8') # XXX should be equal == dtype(long) + assert dtype(nnp + 'i8') != dtype('i8') + assert dtype(nnp + 'i8').byteorder == nnp + assert dtype('=i8').byteorder == '=' + assert dtype(byteorder + 'i8').byteorder == '=' + + def test_alignment(self): + from _numpypy import dtype + assert dtype('i4').alignment == 4 + + def test_typeinfo(self): + from _numpypy import typeinfo, void, number, int64, bool_ + assert typeinfo['Number'] == number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, -9223372036854775808L, int64) + assert typeinfo['VOID'] == ('V', 20, 0, 1, void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) + +class AppTestStrUnicodeDtypes(BaseNumpyAppTest): + def test_str_unicode(self): + from _numpypy import str_, unicode_, character, flexible, generic + + assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] + assert unicode_.mro() == [unicode_, unicode, basestring, character, flexible, generic, object] + + def test_str_dtype(self): + from _numpypy import dtype, str_ + + raises(TypeError, "dtype('Sx')") + d = dtype('S8') + assert d.itemsize == 8 + assert dtype(str) == dtype('S') + assert d.kind == 'S' + assert d.type is str_ + assert d.name == "string64" + assert d.num == 18 + + def test_unicode_dtype(self): + from _numpypy import dtype, unicode_ + + raises(TypeError, "dtype('Ux')") + d = dtype('U8') + assert d.itemsize == 8 * 4 + assert dtype(unicode) == dtype('U') + assert d.kind == 'U' + assert d.type is unicode_ + assert d.name == "unicode256" + assert d.num == 19 + + def test_string_boxes(self): + from _numpypy import str_ + assert isinstance(str_(3), str_) + + def test_unicode_boxes(self): + from _numpypy import unicode_ + assert isinstance(unicode_(3), unicode) + +class AppTestRecordDtypes(BaseNumpyAppTest): + def test_create(self): + from _numpypy import dtype, void + + raises(ValueError, "dtype([('x', int), ('x', float)])") + d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) + assert d.fields['x'] == (dtype('int32'), 0) + assert d.fields['value'] == (dtype(float), 12) + assert d['x'] == dtype('int32') + assert d.name == "void160" + assert d.num == 20 + assert d.itemsize == 20 + assert d.kind == 'V' + assert d.type is void + assert d.char == 'V' + assert d.names == ("x", "y", "z", "value") + raises(KeyError, 'd["xyz"]') + raises(KeyError, 'd.fields["xyz"]') + + def test_create_from_dict(self): + skip("not yet") + from _numpypy import dtype + d = dtype({'names': ['a', 'b', 'c'], + }) + +class AppTestNotDirect(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + def check_non_native(w_obj, w_obj2): + assert w_obj.storage[0] == w_obj2.storage[1] + assert w_obj.storage[1] == w_obj2.storage[0] + if w_obj.storage[0] == '\x00': + assert w_obj2.storage[1] == '\x00' + assert w_obj2.storage[0] == '\x01' + else: + assert w_obj2.storage[1] == '\x01' + assert w_obj2.storage[0] == '\x00' + cls.w_check_non_native = cls.space.wrap(interp2app(check_non_native)) + if option.runappdirect: + py.test.skip("not a direct test") + + def test_non_native(self): + from _numpypy import array + a = array([1, 2, 3], dtype=self.non_native_prefix + 'i2') + assert a[0] == 1 + assert (a + a)[1] == 4 + self.check_non_native(a, array([1, 2, 3], 'i2')) + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -5,15 +5,23 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy import signature from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.interp_iter import Chunk +from pypy.module.micronumpy.interp_iter import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class MockDtype(object): - def malloc(self, size): - return None + class itemtype(object): + @staticmethod + def malloc(size): + return None + def get_size(self): + return 1 + + +def create_slice(a, chunks): + return Chunks(chunks).apply(a) class TestNumArrayDirect(object): def newslice(self, *args): @@ -29,116 +37,116 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice([Chunk(3, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') + s = create_slice(a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice([Chunk(1, 9, 2, 4)]) + s = create_slice(a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice([Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(5, 0, 0, 1)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice([Chunk(3, 0, 0, 1)]) + s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice([Chunk(1, 5, 3, 2)]) - s2 = s.create_slice([Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice([Chunk(9, -1, -2, 5)]) + a = W_NDimArray([10, 5, 3], MockDtype(), order='C') + s = create_slice(a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray([10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray([10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice([Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -203,6 +211,18 @@ assert a.shape == (3,) assert a.dtype is dtype(int) + def test_ndmin(self): + from _numpypy import array + + arr = array([[[1]]], ndmin=1) + assert arr.shape == (1, 1, 1) + + def test_noop_ndmin(self): + from _numpypy import array + + arr = array([1], ndmin=3) + assert arr.shape == (1, 1, 1) + def test_type(self): from _numpypy import array ar = array(range(5)) @@ -374,6 +394,58 @@ assert a[1] == 0. assert a[3] == 0. + def test_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = array([range(5)]) + assert (a[newaxis] == b).all() + + def test_newaxis_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = array(range(1,5)) + c = array([range(1,5)]) + d = array([[x] for x in range(1,5)]) + + assert (a[1:] == b).all() + assert (a[1:,newaxis] == d).all() + assert (a[newaxis,1:] == c).all() + + def test_newaxis_assign(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + a[newaxis,1] = [2] + assert a[1] == 2 + + def test_newaxis_virtual(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + + a = array(range(5)) + b = (a + a)[newaxis] + c = array([[0, 2, 4, 6, 8]]) + assert (b == c).all() + + def test_newaxis_then_slice(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[newaxis] + assert b.shape == (1, 5) + assert (b[0,1:] == a[1:]).all() + + def test_slice_then_newaxis(self): + from _numpypy import array + from numpypy.core.numeric import newaxis + a = array(range(5)) + b = a[2:] + assert (b[newaxis] == [[2, 3, 4]]).all() + def test_scalar(self): from _numpypy import array, dtype a = array(3) @@ -434,6 +506,8 @@ a = zeros((4, 2, 3)) a.shape = (12, 2) (a + a).reshape(2, 12) # assert did not explode + a = array([[[[]]]]) + assert a.reshape((0,)).shape == (0,) def test_slice_reshape(self): from _numpypy import zeros, arange @@ -625,6 +699,56 @@ for i in range(5): assert b[i] == i / 5.0 + def test_floordiv(self): + from math import isnan + from _numpypy import array, dtype + + a = array(range(1, 6)) + b = a // a + assert (b == [1, 1, 1, 1, 1]).all() + + a = array(range(1, 6), dtype=bool) + b = a // a + assert b.dtype is dtype("int8") + assert (b == [1, 1, 1, 1, 1]).all() + + a = array([-1, 0, 1]) + b = array([0, 0, 0]) + c = a // b + assert (c == [0, 0, 0]).all() + + a = array([-1.0, 0.0, 1.0]) + b = array([0.0, 0.0, 0.0]) + c = a // b + assert c[0] == float('-inf') + assert isnan(c[1]) + assert c[2] == float('inf') + + b = array([-0.0, -0.0, -0.0]) + c = a // b + assert c[0] == float('inf') + assert isnan(c[1]) + assert c[2] == float('-inf') + + def test_floordiv_other(self): + from _numpypy import array + a = array(range(5)) + b = array([2, 2, 2, 2, 2], float) + c = a // b + assert (c == [0, 0, 1, 1, 2]).all() + + def test_rfloordiv(self): + from _numpypy import array + a = array(range(1, 6)) + b = 3 // a + assert (b == [3, 1, 1, 0, 0]).all() + + def test_floordiv_constant(self): + from _numpypy import array + a = array(range(5)) + b = a // 2 + assert (b == [0, 0, 1, 1, 2]).all() + def test_truediv(self): from operator import truediv from _numpypy import arange @@ -871,6 +995,10 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + d = array(0.) + b = a.sum(out=d) + assert b == d + assert isinstance(b, float) def test_reduce_nd(self): from numpypy import arange, array, multiply @@ -1037,7 +1165,7 @@ assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - assert array([1L, 2, 3]).dtype is dtype(long) + #assert array([1L, 2, 3]).dtype is dtype(long) assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1371,8 +1499,6 @@ a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) b = a[::2] - print a - print b assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() c = b + b assert c[1][1] == 12 @@ -1544,6 +1670,7 @@ a = arange(12).reshape(3,4) b = a.T.flat b[6::2] = [-1, -2] + print a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]] assert (a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]]).all() b[0:2] = [[[100]]] assert(a[0,0] == 100) @@ -1818,6 +1945,12 @@ #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) + def test_tostring(self): + from _numpypy import array + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' class AppTestRanges(BaseNumpyAppTest): def test_arange(self): @@ -1863,3 +1996,57 @@ cache = get_appbridge_cache(cls.space) cache.w_array_repr = cls.old_array_repr cache.w_array_str = cls.old_array_str + +class AppTestRecordDtype(BaseNumpyAppTest): + def test_zeros(self): + from _numpypy import zeros + a = zeros(2, dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]["xyz"]') + assert a[0]['x'] == 0 + assert a[0]['y'] == 0 + raises(ValueError, "a[0] = (1, 2, 3)") + a[0]['x'] = 13 + assert a[0]['x'] == 13 + a[1] = (1, 2) + assert a[1]['y'] == 2 + b = zeros(2, dtype=[('x', int), ('y', float)]) + b[1] = a[1] + assert a[1]['y'] == 2 + + def test_views(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + raises(ValueError, 'array([1])["x"]') + raises(ValueError, 'a["z"]') + assert a['x'][1] == 3 + assert a['y'][1] == 4 + a['x'][0] = 15 + assert a['x'][0] == 15 + b = a['x'] + a['y'] + assert (b == [15+2, 3+4]).all() + assert b.dtype == float + + def test_assign_tuple(self): + from _numpypy import zeros + a = zeros((2, 3), dtype=[('x', int), ('y', float)]) + a[1, 2] = (1, 2) + assert a['x'][1, 2] == 1 + assert a['y'][1, 2] == 2 + + def test_creation_and_repr(self): + from _numpypy import array + a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) + assert repr(a[0]) == '(1, 2.0)' + + def test_nested_dtype(self): + from _numpypy import zeros + a = [('x', int), ('y', float)] + b = [('x', int), ('y', a)] + arr = zeros(3, dtype=b) + arr[1]['x'] = 15 + assert arr[1]['x'] == 15 + arr[1]['y']['y'] = 3.5 + assert arr[1]['y']['y'] == 3.5 + assert arr[1]['y']['x'] == 0.0 + assert arr[1]['x'] == 15 + diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -0,0 +1,126 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestOutArg(BaseNumpyAppTest): + def test_reduce_out(self): + from numpypy import arange, zeros, array + a = arange(15).reshape(5, 3) + b = arange(12).reshape(4,3) + c = a.sum(0, out=b[1]) + assert (c == [30, 35, 40]).all() + assert (c == b[1]).all() + raises(ValueError, 'a.prod(0, out=arange(10))') + a=arange(12).reshape(3,2,2) + raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))') + raises(ValueError, 'a.sum(0, out=arange(3))') + c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool)) + #You could argue that this should product False, but + # that would require an itermediate result. Cpython numpy + # gives True. + assert c == True + a = array([[-1, 0, 1], [1, 0, -1]]) + c = a.sum(0, out=zeros((3,), dtype=bool)) + assert (c == [True, False, True]).all() + c = a.sum(1, out=zeros((2,), dtype=bool)) + assert (c == [True, True]).all() + + def test_reduce_intermediary(self): + from numpypy import arange, array + a = arange(15).reshape(5, 3) + b = array(range(3), dtype=bool) + c = a.prod(0, out=b) + assert(b == [False, True, True]).all() + + def test_ufunc_out(self): + from _numpypy import array, negative, zeros, sin + from math import sin as msin + a = array([[1, 2], [3, 4]]) + c = zeros((2,2,2)) + b = negative(a + a, out=c[1]) + #test for view, and also test that forcing out also forces b + assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() + assert (b == [[-2, -4], [-6, -8]]).all() + #Test broadcast, type promotion + b = negative(3, out=a) + assert (a == -3).all() + c = zeros((2, 2), dtype=float) + b = negative(3, out=c) + assert b.dtype.kind == c.dtype.kind + assert b.shape == c.shape + a = array([1, 2]) + b = sin(a, out=c) + assert(c == [[msin(1), msin(2)]] * 2).all() + b = sin(a, out=c+c) + assert (c == b).all() + + #Test shape agreement + a = zeros((3,4)) + b = zeros((3,5)) + raises(ValueError, 'negative(a, out=b)') + b = zeros((1,4)) + raises(ValueError, 'negative(a, out=b)') + + def test_binfunc_out(self): + from _numpypy import array, add + a = array([[1, 2], [3, 4]]) + out = array([[1, 2], [3, 4]]) + c = add(a, a, out=out) + assert (c == out).all() + assert c.shape == a.shape + assert c.dtype is a.dtype + c[0,0] = 100 + assert out[0, 0] == 100 + out[:] = 100 + raises(ValueError, 'c = add(a, a, out=out[1])') + c = add(a[0], a[1], out=out[1]) + assert (c == out[1]).all() + assert (c == [4, 6]).all() + assert (out[0] == 100).all() + c = add(a[0], a[1], out=out) + assert (c == out[1]).all() + assert (c == out[0]).all() + out = array(16, dtype=int) + b = add(10, 10, out=out) + assert b==out + assert b.dtype == out.dtype + + def test_applevel(self): + from _numpypy import array, sum, max, min + a = array([[1, 2], [3, 4]]) + out = array([[0, 0], [0, 0]]) + c = sum(a, axis=0, out=out[0]) + assert (c == [4, 6]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + c = max(a, axis=1, out=out[0]) + assert (c == [2, 4]).all() + assert (c == out[0]).all() + assert (c != out[1]).all() + + def test_ufunc_cast(self): + from _numpypy import array, negative, add, sum + a = array(16, dtype = int) + c = array(0, dtype = float) + b = negative(a, out=c) + assert b == c + b = add(a, a, out=c) + assert b == c + d = array([16, 16], dtype=int) + b = sum(d, out=c) + assert b == c + try: + from _numpypy import version + v = version.version.split('.') + except: + v = ['1', '6', '0'] # numpypy is api compatable to what version? + if v[0]<'2': + b = negative(c, out=a) + assert b == a + b = add(c, c, out=a) + assert b == a + b = sum(array([16, 16], dtype=float), out=a) + assert b == a + else: + cast_error = raises(TypeError, negative, c, a) + assert str(cast_error.value) == \ + "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'" diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -113,14 +113,37 @@ assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_true_divide(self): + from _numpypy import array, true_divide + + a = array([0, 1, 2, 3, 4, 1, -1]) + b = array([4, 4, 4, 4, 4, 0, 0]) + c = true_divide(a, b) + assert (c == [0.0, 0.25, 0.5, 0.75, 1.0, float('inf'), float('-inf')]).all() + + assert math.isnan(true_divide(0, 0)) + def test_fabs(self): from _numpypy import array, fabs - from math import fabs as math_fabs + from math import fabs as math_fabs, isnan a = array([-5.0, -0.0, 1.0]) b = fabs(a) for i in range(3): assert b[i] == math_fabs(a[i]) + assert fabs(float('inf')) == float('inf') + assert fabs(float('-inf')) == float('inf') + assert isnan(fabs(float('nan'))) + + def test_fmod(self): + from _numpypy import fmod + import math + + assert fmod(-1e-100, 1e100) == -1e-100 + assert fmod(3, float('inf')) == 3 + assert (fmod([-3, -2, -1, 1, 2, 3], 2) == [-1, 0, -1, 1, 0, 1]).all() + for v in [float('inf'), float('-inf'), float('nan'), float('-nan')]: + assert math.isnan(fmod(v, 2)) def test_minimum(self): from _numpypy import array, minimum @@ -172,6 +195,14 @@ assert a[0] == 1 assert a[1] == 0 + def test_signbit(self): + from _numpypy import signbit, copysign + + assert (signbit([0, 0.0, 1, 1.0, float('inf'), float('nan')]) == + [False, False, False, False, False, False]).all() From noreply at buildbot.pypy.org Fri Mar 30 13:22:12 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:22:12 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup2: refactor compiler search a bit Message-ID: <20120330112212.46F8F82252@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup2 Changeset: r54100:a86eea58d38b Date: 2012-03-27 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/a86eea58d38b/ Log: refactor compiler search a bit diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -451,6 +451,7 @@ GetLastError = windll.kernel32.GetLastError else: GetLastError = windll.coredll.GetLastError + GetLastError.argtypes=[] from _ctypes import get_last_error, set_last_error def WinError(code=None, descr=None): diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -299,10 +299,11 @@ def set_platform(new_platform, cc): global platform - log.msg("Setting platform to %r cc=%s" % (new_platform,cc)) platform = pick_platform(new_platform, cc) if not platform: - raise ValueError("pick_platform failed") + raise ValueError("pick_platform(%r, %s) failed"%(new_platform, cc)) + log.msg("Set platform with %r cc=%s, using cc=%r" % (new_platform, cc, + getattr(platform, 'cc','Unknown'))) if new_platform == 'host': global host diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -83,13 +83,9 @@ if env is not None: return env - log.error("Could not find a Microsoft Compiler") # Assume that the compiler is already part of the environment -msvc_compiler_environ32 = find_msvc_env(False) -msvc_compiler_environ64 = find_msvc_env(True) - class MsvcPlatform(Platform): name = "msvc" so_ext = 'dll' @@ -108,10 +104,7 @@ def __init__(self, cc=None, x64=False): self.x64 = x64 - if x64: - msvc_compiler_environ = msvc_compiler_environ64 - else: - msvc_compiler_environ = msvc_compiler_environ32 + msvc_compiler_environ = find_msvc_env(x64) Platform.__init__(self, 'cl.exe') if msvc_compiler_environ: self.c_environ = os.environ.copy() From noreply at buildbot.pypy.org Fri Mar 30 13:22:14 2012 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Mar 2012 13:22:14 +0200 (CEST) Subject: [pypy-commit] pypy win32-cleanup2: validate fp by using os.read in fread Message-ID: <20120330112214.1575182252@wyvern.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-cleanup2 Changeset: r54101:537c9b127d8f Date: 2012-03-30 14:19 +0300 http://bitbucket.org/pypy/pypy/changeset/537c9b127d8f/ Log: validate fp by using os.read in fread diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -79,8 +79,16 @@ # FILE* interface FILEP = rffi.COpaquePtr('FILE') + fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) -fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) +#fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) +def fclose(fp): + try: + fd = fileno(fp) + return os.close(fd) + except: + return -1 + fwrite = rffi.llexternal('fwrite', [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP], rffi.SIZE_T) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -2,13 +2,15 @@ from pypy.interpreter.astcompiler import consts from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, + cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fileno, feof, Py_ssize_tP, cpython_struct) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling +import os + PyCompilerFlags = cpython_struct( "PyCompilerFlags", (("cf_flags", rffi.INT),)) PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags) @@ -152,19 +154,23 @@ closeit set to 0 and flags set to NULL.""" BUF_SIZE = 8192 source = "" + try: + fd = fileno(fp) + count = os.fstat(fd).st_size + while len(source) < count: + buf = os.read(fd, BUF_SIZE) + source += buf + if len(buf) < BUF_SIZE: + break + else: + PyErr_SetFromErrno(space, space.w_IOError) + except: + PyErr_SetFromErrno(space, space.w_IOError) + return None + finally: + pass + #lltype.free(buf, flavor='raw') filename = rffi.charp2str(filename) - buf = lltype.malloc(rffi.CCHARP.TO, BUF_SIZE, flavor='raw') - try: - while True: - count = fread(buf, 1, BUF_SIZE, fp) - count = rffi.cast(lltype.Signed, count) - source += rffi.charpsize2str(buf, count) - if count < BUF_SIZE: - if feof(fp): - break - PyErr_SetFromErrno(space, space.w_IOError) - finally: - lltype.free(buf, flavor='raw') return run_string(space, source, filename, start, w_globals, w_locals) # Undocumented function! From noreply at buildbot.pypy.org Fri Mar 30 20:21:28 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 30 Mar 2012 20:21:28 +0200 (CEST) Subject: [pypy-commit] pypy default: always look inside the product - we kind of need it. Message-ID: <20120330182128.2543882252@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54102:8ea355410395 Date: 2012-03-30 19:56 +0200 http://bitbucket.org/pypy/pypy/changeset/8ea355410395/ Log: always look inside the product - we kind of need it. diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,9 +1,9 @@ from pypy.rlib import jit - at jit.look_inside_iff(lambda s: jit.isconstant(len(s))) + at jit.unroll_safe def product(s): i = 1 for x in s: i *= x - return i \ No newline at end of file + return i From noreply at buildbot.pypy.org Fri Mar 30 20:21:29 2012 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 30 Mar 2012 20:21:29 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20120330182129.6DC7F82252@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r54103:4198f84765dd Date: 2012-03-30 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4198f84765dd/ Log: merge diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -296,6 +296,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self.frame_trace_action = FrameTraceAction(self) + self._code_of_sys_exc_info = None from pypy.interpreter.pycode import cpython_magic, default_magic self.our_magic = default_magic diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -154,6 +154,7 @@ #operationerr.print_detailed_traceback(self.space) def _convert_exc(self, operr): + # Only for the flow object space return operr def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -113,6 +113,12 @@ from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit + # + if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info + and nargs == 0): + from pypy.module.sys.vm import exc_info_direct + return exc_info_direct(self.space, frame) + # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -874,6 +874,12 @@ fn.add_to_table() if gateway.as_classmethod: fn = ClassMethod(space.wrap(fn)) + # + from pypy.module.sys.vm import exc_info + if code._bltin is exc_info: + assert space._code_of_sys_exc_info is None + space._code_of_sys_exc_info = code + # return fn diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,7 +11,7 @@ from pypy.objspace.std.register_all import register_all from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.objectmodel import specialize +from pypy.rlib.objectmodel import specialize, keepalive_until_here from pypy.rpython.lltypesystem import lltype, rffi @@ -145,18 +145,24 @@ unroll_typecodes = unrolling_iterable(types.keys()) class ArrayBuffer(RWBuffer): - def __init__(self, data, bytes): - self.data = data - self.len = bytes + def __init__(self, array): + self.array = array def getlength(self): - return self.len + return self.array.len * self.array.itemsize def getitem(self, index): - return self.data[index] + array = self.array + data = array._charbuf_start() + char = data[index] + array._charbuf_stop() + return char def setitem(self, index, char): - self.data[index] = char + array = self.array + data = array._charbuf_start() + data[index] = char + array._charbuf_stop() def make_array(mytype): @@ -278,9 +284,10 @@ oldlen = self.len new = len(s) / mytype.bytes self.setlen(oldlen + new) - cbuf = self.charbuf() + cbuf = self._charbuf_start() for i in range(len(s)): cbuf[oldlen * mytype.bytes + i] = s[i] + self._charbuf_stop() def fromlist(self, w_lst): s = self.len @@ -310,8 +317,11 @@ else: self.fromsequence(w_iterable) - def charbuf(self): - return rffi.cast(rffi.CCHARP, self.buffer) + def _charbuf_start(self): + return rffi.cast(rffi.CCHARP, self.buffer) + + def _charbuf_stop(self): + keepalive_until_here(self) def w_getitem(self, space, idx): item = self.buffer[idx] @@ -530,8 +540,10 @@ self.fromstring(space.str_w(w_s)) def array_tostring__Array(space, self): - cbuf = self.charbuf() - return self.space.wrap(rffi.charpsize2str(cbuf, self.len * mytype.bytes)) + cbuf = self._charbuf_start() + s = rffi.charpsize2str(cbuf, self.len * mytype.bytes) + self._charbuf_stop() + return self.space.wrap(s) def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n): if not isinstance(w_f, W_File): @@ -613,8 +625,7 @@ # Misc methods def buffer__Array(space, self): - b = ArrayBuffer(self.charbuf(), self.len * mytype.bytes) - return space.wrap(b) + return space.wrap(ArrayBuffer(self)) def array_buffer_info__Array(space, self): w_ptr = space.wrap(rffi.cast(lltype.Unsigned, self.buffer)) @@ -649,7 +660,7 @@ raise OperationError(space.w_RuntimeError, space.wrap(msg)) if self.len == 0: return - bytes = self.charbuf() + bytes = self._charbuf_start() tmp = [bytes[0]] * mytype.bytes for start in range(0, self.len * mytype.bytes, mytype.bytes): stop = start + mytype.bytes - 1 @@ -657,6 +668,7 @@ tmp[i] = bytes[start + i] for i in range(mytype.bytes): bytes[stop - i] = tmp[i] + self._charbuf_stop() def repr__Array(space, self): if self.len == 0: diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -433,7 +433,25 @@ a = self.array('h', 'Hi') buf = buffer(a) assert buf[1] == 'i' - #raises(TypeError, buf.__setitem__, 1, 'o') + + def test_buffer_write(self): + a = self.array('c', 'hello') + buf = buffer(a) + print repr(buf) + try: + buf[3] = 'L' + except TypeError: + skip("buffer(array) returns a read-only buffer on CPython") + assert a.tostring() == 'helLo' + + def test_buffer_keepalive(self): + buf = buffer(self.array('c', 'text')) + assert buf[2] == 'x' + # + a = self.array('c', 'foobarbaz') + buf = buffer(a) + a.fromstring('some extra text') + assert buf[:] == 'foobarbazsome extra text' def test_list_methods(self): assert repr(self.array('i')) == "array('i')" diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,6 +4,8 @@ PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError +from pypy.module.array.interp_array import ArrayBuffer PyBufferObjectStruct = lltype.ForwardReference() @@ -43,10 +45,15 @@ if isinstance(w_obj, StringBuffer): py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) + py_buf.c_b_size = w_obj.getlength() + elif isinstance(w_obj, ArrayBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.data) py_buf.c_b_size = w_obj.getlength() else: - raise Exception("Fail fail fail fail fail") + raise OperationError(space.w_NotImplementedError, space.wrap( + "buffer flavor not supported")) def buffer_realize(space, py_obj): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -48,3 +48,17 @@ ]) b = module.buffer_new() raises(AttributeError, getattr, b, 'x') + + def test_array_buffer(self): + module = self.import_extension('foo', [ + ("roundtrip", "METH_O", + """ + PyBufferObject *buf = (PyBufferObject *)args; + return PyString_FromStringAndSize(buf->b_ptr, buf->b_size); + """), + ]) + import array + a = array.array('c', 'text') + b = buffer(a) + assert module.roundtrip(b) == 'text' + diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -99,6 +99,8 @@ ("exp2", "exp2"), ("expm1", "expm1"), ("fabs", "fabs"), + ("fmax", "fmax"), + ("fmin", "fmin"), ("fmod", "fmod"), ("floor", "floor"), ("ceil", "ceil"), @@ -122,12 +124,14 @@ ("sinh", "sinh"), ("subtract", "subtract"), ('sqrt', 'sqrt'), + ('square', 'square'), ("tan", "tan"), ("tanh", "tanh"), ('bitwise_and', 'bitwise_and'), ('bitwise_or', 'bitwise_or'), ('bitwise_xor', 'bitwise_xor'), ('bitwise_not', 'invert'), + ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), ('isneginf', 'isneginf'), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -541,6 +541,8 @@ ("reciprocal", "reciprocal", 1), ("fabs", "fabs", 1, {"promote_to_float": True}), + ("fmax", "fmax", 2, {"promote_to_float": True}), + ("fmin", "fmin", 2, {"promote_to_float": True}), ("fmod", "fmod", 2, {"promote_to_float": True}), ("floor", "floor", 1, {"promote_to_float": True}), ("ceil", "ceil", 1, {"promote_to_float": True}), @@ -549,6 +551,7 @@ ("expm1", "expm1", 1, {"promote_to_float": True}), ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ('square', 'square', 1, {'promote_to_float': True}), ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -135,6 +135,38 @@ assert fabs(float('-inf')) == float('inf') assert isnan(fabs(float('nan'))) + def test_fmax(self): + from _numpypy import fmax + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmax(a, [ninf]*5) == a).all() + assert (fmax(a, [inf]*5) == [inf]*5).all() + assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all() + assert math.isnan(fmax(nan, 0)) + assert math.isnan(fmax(0, nan)) + assert math.isnan(fmax(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmax(nnan, nan)) == -1.0 + + def test_fmin(self): + from _numpypy import fmin + import math + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + a = [ninf, -5, 0, 5, inf] + assert (fmin(a, [ninf]*5) == [ninf]*5).all() + assert (fmin(a, [inf]*5) == a).all() + assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all() + assert math.isnan(fmin(nan, 0)) + assert math.isnan(fmin(0, nan)) + assert math.isnan(fmin(nan, nan)) + # The numpy docs specify that the FIRST NaN should be used if both are NaN + assert math.copysign(1.0, fmin(nnan, nan)) == -1.0 + def test_fmod(self): from _numpypy import fmod import math @@ -455,6 +487,19 @@ assert math.isnan(sqrt(-1)) assert math.isnan(sqrt(nan)) + def test_square(self): + import math + from _numpypy import square + + nan, inf, ninf = float("nan"), float("inf"), float("-inf") + + assert math.isnan(square(nan)) + assert math.isinf(square(inf)) + assert math.isinf(square(ninf)) + assert square(ninf) > 0 + assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)] + assert math.isinf(square(1e300)) + def test_radians(self): import math from _numpypy import radians, array @@ -546,10 +591,11 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from _numpypy import bitwise_not, array + from _numpypy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() + assert (invert(a) == ~a).all() def test_comparisons(self): import operator diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -631,6 +631,22 @@ return math.fabs(v) @simple_binary_op + def fmax(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return max(v1, v2) + + @simple_binary_op + def fmin(self, v1, v2): + if math.isnan(v1): + return v1 + elif math.isnan(v2): + return v2 + return min(v1, v2) + + @simple_binary_op def fmod(self, v1, v2): try: return math.fmod(v1, v2) @@ -741,6 +757,10 @@ except ValueError: return rfloat.NAN + @simple_unary_op + def square(self, v): + return v*v + @raw_unary_op def isnan(self, v): return rfloat.isnan(v) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -351,3 +351,23 @@ # the following assertion fails if the loop was cancelled due # to "abort: vable escape" assert len(log.loops_by_id("eval")) == 1 + + def test_sys_exc_info(self): + def main(): + i = 1 + lst = [i] + while i < 1000: + try: + return lst[i] + except: + e = sys.exc_info()[1] # ID: exc_info + if not isinstance(e, IndexError): + raise + i += 1 + return 42 + + log = self.run(main) + assert log.result == 42 + # the following assertion fails if the loop was cancelled due + # to "abort: vable escape" + assert len(log.loops_by_id("exc_info")) == 1 diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -595,3 +595,121 @@ assert len(frames) == 1 _, other_frame = frames.popitem() assert other_frame.f_code.co_name in ('other_thread', '?') + + +class AppTestSysExcInfoDirect: + + def setup_method(self, meth): + self.seen = [] + from pypy.module.sys import vm + def exc_info_with_tb(*args): + self.seen.append("n") # not optimized + return self.old[0](*args) + def exc_info_without_tb(*args): + self.seen.append("y") # optimized + return self.old[1](*args) + self.old = [vm.exc_info_with_tb, vm.exc_info_without_tb] + vm.exc_info_with_tb = exc_info_with_tb + vm.exc_info_without_tb = exc_info_without_tb + # + from pypy.rlib import jit + self.old2 = [jit.we_are_jitted] + jit.we_are_jitted = lambda: True + + def teardown_method(self, meth): + from pypy.module.sys import vm + from pypy.rlib import jit + vm.exc_info_with_tb = self.old[0] + vm.exc_info_without_tb = self.old[1] + jit.we_are_jitted = self.old2[0] + # + assert ''.join(self.seen) == meth.expected + + def test_returns_none(self): + import sys + assert sys.exc_info() == (None, None, None) + assert sys.exc_info()[0] is None + assert sys.exc_info()[1] is None + assert sys.exc_info()[2] is None + assert sys.exc_info()[:2] == (None, None) + assert sys.exc_info()[:3] == (None, None, None) + assert sys.exc_info()[0:2] == (None, None) + assert sys.exc_info()[2:4] == (None,) + test_returns_none.expected = 'nnnnnnnn' + + def test_returns_subscr(self): + import sys + e = KeyError("boom") + try: + raise e + except: + assert sys.exc_info()[0] is KeyError # y + assert sys.exc_info()[1] is e # y + assert sys.exc_info()[2] is not None # n + assert sys.exc_info()[-3] is KeyError # y + assert sys.exc_info()[-2] is e # y + assert sys.exc_info()[-1] is not None # n + test_returns_subscr.expected = 'yynyyn' + + def test_returns_slice_2(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[:0] == () # y + assert sys.exc_info()[:1] == foo[:1] # y + assert sys.exc_info()[:2] == foo[:2] # y + assert sys.exc_info()[:3] == foo # n + assert sys.exc_info()[:4] == foo # n + assert sys.exc_info()[:-1] == foo[:2] # y + assert sys.exc_info()[:-2] == foo[:1] # y + assert sys.exc_info()[:-3] == () # y + test_returns_slice_2.expected = 'nyyynnyyy' + + def test_returns_slice_3(self): + import sys + e = KeyError("boom") + try: + raise e + except: + foo = sys.exc_info() # n + assert sys.exc_info()[2:2] == () # y + assert sys.exc_info()[0:1] == foo[:1] # y + assert sys.exc_info()[1:2] == foo[1:2] # y + assert sys.exc_info()[0:3] == foo # n + assert sys.exc_info()[2:4] == foo[2:] # n + assert sys.exc_info()[0:-1] == foo[:2] # y + assert sys.exc_info()[0:-2] == foo[:1] # y + assert sys.exc_info()[5:-3] == () # y + test_returns_slice_3.expected = 'nyyynnyyy' + + def test_strange_invocation(self): + import sys + e = KeyError("boom") + try: + raise e + except: + a = []; k = {} + assert sys.exc_info(*a)[:0] == () + assert sys.exc_info(**k)[:0] == () + test_strange_invocation.expected = 'nn' + + def test_call_in_subfunction(self): + import sys + def g(): + # this case is not optimized, because we need to search the + # frame chain. it's probably not worth the complications + return sys.exc_info()[1] + e = KeyError("boom") + try: + raise e + except: + assert g() is e + test_call_in_subfunction.expected = 'n' + + +class AppTestSysExcInfoDirectCallMethod(AppTestSysExcInfoDirect): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.opcodes.CALL_METHOD": True}) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -89,6 +89,9 @@ """Return the (type, value, traceback) of the most recent exception caught by an except clause in the current stack frame or in an older stack frame.""" + return exc_info_with_tb(space) # indirection for the tests + +def exc_info_with_tb(space): operror = space.getexecutioncontext().sys_exc_info() if operror is None: return space.newtuple([space.w_None,space.w_None,space.w_None]) @@ -96,6 +99,59 @@ return space.newtuple([operror.w_type, operror.get_w_value(space), space.wrap(operror.get_traceback())]) +def exc_info_without_tb(space, frame): + operror = frame.last_exception + return space.newtuple([operror.w_type, operror.get_w_value(space), + space.w_None]) + +def exc_info_direct(space, frame): + from pypy.tool import stdlib_opcode + # In order to make the JIT happy, we try to return (exc, val, None) + # instead of (exc, val, tb). We can do that only if we recognize + # the following pattern in the bytecode: + # CALL_FUNCTION/CALL_METHOD <-- invoking me + # LOAD_CONST 0, 1, -2 or -3 + # BINARY_SUBSCR + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST <=2 + # SLICE_2 + # or: + # CALL_FUNCTION/CALL_METHOD + # LOAD_CONST any integer + # LOAD_CONST <=2 + # SLICE_3 + need_all_three_args = True + co = frame.getcode().co_code + p = frame.last_instr + if (ord(co[p]) == stdlib_opcode.CALL_FUNCTION or + ord(co[p]) == stdlib_opcode.CALL_METHOD): + if ord(co[p+3]) == stdlib_opcode.LOAD_CONST: + lo = ord(co[p+4]) + hi = ord(co[p+5]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + constant = space.int_w(w_constant) + if ord(co[p+6]) == stdlib_opcode.BINARY_SUBSCR: + if -3 <= constant <= 1 and constant != -1: + need_all_three_args = False + elif ord(co[p+6]) == stdlib_opcode.SLICE+2: + if constant <= 2: + need_all_three_args = False + elif (ord(co[p+6]) == stdlib_opcode.LOAD_CONST and + ord(co[p+9]) == stdlib_opcode.SLICE+3): + lo = ord(co[p+7]) + hi = ord(co[p+8]) + w_constant = frame.getconstant_w((hi * 256) | lo) + if space.isinstance_w(w_constant, space.w_int): + if space.int_w(w_constant) <= 2: + need_all_three_args = False + # + if need_all_three_args or frame.last_exception is None or frame.hide(): + return exc_info_with_tb(space) + else: + return exc_info_without_tb(space, frame) + def exc_clear(space): """Clear global information on the current exception. Subsequent calls to exc_info() will return (None,None,None) until another exception is diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -38,7 +38,7 @@ closed_heads.reverse() for head, branch in closed_heads: - print '\t', branch + print '\t', head, '\t', branch print print 'The branches listed above will be merged to "closed-branches".' print 'You need to run this script in a clean working copy where you' From noreply at buildbot.pypy.org Sat Mar 31 10:13:16 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Mar 2012 10:13:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix cpyext after recent changes to the array module. Message-ID: <20120331081316.535A282252@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54104:0a2c76961534 Date: 2012-03-31 10:12 +0200 http://bitbucket.org/pypy/pypy/changeset/0a2c76961534/ Log: Fix cpyext after recent changes to the array module. diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) -from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.array.interp_array import ArrayBuffer @@ -43,13 +43,18 @@ py_buf.c_b_offset = w_obj.offset w_obj = w_obj.buffer + # If w_obj already allocated a fixed buffer, use it, and keep a + # reference to w_obj. + # Otherwise, b_base stays NULL, and we own the b_ptr. + if isinstance(w_obj, StringBuffer): - py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_base = lltype.nullptr(PyObject.TO) py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) py_buf.c_b_size = w_obj.getlength() elif isinstance(w_obj, ArrayBuffer): - py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.data) + w_base = w_obj.array + py_buf.c_b_base = make_ref(space, w_base) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start()) py_buf.c_b_size = w_obj.getlength() else: raise OperationError(space.w_NotImplementedError, space.wrap( @@ -60,14 +65,16 @@ """ Creates the buffer in the PyPy interpreter from a cpyext representation. """ - raise Exception("realize fail fail fail") - + raise OperationError(space.w_NotImplementedError, space.wrap( + "Don't know how to realize a buffer")) @cpython_api([PyObject], lltype.Void, external=False) def buffer_dealloc(space, py_obj): py_buf = rffi.cast(PyBufferObject, py_obj) - Py_DecRef(space, py_buf.c_b_base) - rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) + if py_buf.c_b_base: + Py_DecRef(space, py_buf.c_b_base) + else: + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) From noreply at buildbot.pypy.org Sat Mar 31 11:28:28 2012 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Mar 2012 11:28:28 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1101: Even if only string keys are allowed in type dicts, Message-ID: <20120331092828.6521982252@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r54105:c07e2a2ba2f8 Date: 2012-03-31 11:27 +0200 http://bitbucket.org/pypy/pypy/changeset/c07e2a2ba2f8/ Log: issue1101: Even if only string keys are allowed in type dicts, it should be possible to fetch with a unicode key: str.__dict__[u'decode'] diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -20,7 +20,17 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if (space.is_w(w_lookup_type, space.w_str) or # Most common path first + space.abstract_issubclass_w(w_lookup_type, space.w_str)): + return self.getitem_str(w_dict, space.str_w(w_key)) + elif space.abstract_issubclass_w(w_lookup_type, space.w_unicode): + try: + w_key = space.str(w_key) + except OperationError, e: + if not e.match(space, space.w_UnicodeEncodeError): + raise + # non-ascii unicode is never equal to a byte string + return None return self.getitem_str(w_dict, space.str_w(w_key)) else: return None diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -25,6 +25,16 @@ key, value = NotEmpty.__dict__.popitem() assert (key == 'a' and value == 1) or (key == 'b' and value == 4) + def test_dictproxy_getitem(self): + class NotEmpty(object): + a = 1 + assert 'a' in NotEmpty.__dict__ + class substr(str): pass + assert substr('a') in NotEmpty.__dict__ + assert u'a' in NotEmpty.__dict__ + assert NotEmpty.__dict__[u'a'] == 1 + assert u'\xe9' not in NotEmpty.__dict__ + def test_dictproxyeq(self): class a(object): pass From noreply at buildbot.pypy.org Sat Mar 31 12:31:44 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 12:31:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Windows fix. Message-ID: <20120331103144.8DD2582252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54106:39ba7e60ebca Date: 2012-03-31 12:30 +0200 http://bitbucket.org/pypy/pypy/changeset/39ba7e60ebca/ Log: Windows fix. diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -93,6 +93,7 @@ return result def parse_plain(graph_id, plaincontent, links={}, fixedfont=False): + plaincontent = plaincontent.replace('\r\n', '\n') # fix Windows EOL lines = plaincontent.splitlines(True) for i in range(len(lines)-2, -1, -1): if lines[i].endswith('\\\n'): # line ending in '\' From noreply at buildbot.pypy.org Sat Mar 31 14:27:43 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 14:27:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Update with the current state, the next things to work on, and more "to Message-ID: <20120331122743.54E9A82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4166:d80dd2d3c300 Date: 2012-03-30 18:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/d80dd2d3c300/ Log: Update with the current state, the next things to work on, and more "to do later". diff --git a/planning/stm.txt b/planning/stm.txt --- a/planning/stm.txt +++ b/planning/stm.txt @@ -2,7 +2,10 @@ STM planning ============ -Comments in << >> describe the next thing to work on. +| +| Bars on the left describe the next thing to work on. +| On the other hand, "TODO" means "to do later". +| Overview @@ -23,34 +26,43 @@ access to a global object, we need to make a whole copy of it into our nursery. -The RPython program should have at least one hint: "force local copy", +| The "global area" should be implemented by reusing gc/minimarkpage.py. + +The RPython program can use this hint: 'x = hint(x, stm_write=True)', which is like writing to an object in the sense that it forces a local copy. -We need annotator support to track which variables contain objects that -are known to be local. It lets us avoid the run-time check. That's -useful for all freshly malloc'ed objects, which we know are always -local; and that's useful for special cases like the PyFrames, on which -we would use the "force local copy" hint before running the -interpreter. In both cases the result is: no STM code is needed any -more. +In translator.stm.transform, we track which variables contain objects +that are known to be local. It lets us avoid the run-time check. +That's useful for all freshly malloc'ed objects, which we know are +always local; and that's useful for special cases like the PyFrames, on +which we used the "stm_write=True" hint before running the interpreter. +In both cases the result is: no STM code is needed any more. When a transaction commits, we do a "minor collection"-like process, called an "end-of-transaction collection": we move all surviving objects -from the nursery to the global area, either as new objects, or as -overwrites of their previous version. Unlike the minor collections in -other GCs, this one occurs at a well-defined time, with no stack roots -to scan. +from the nursery to the global area, either as new objects (first step +done by stmgc.py), or as overwrites of their previous version (second +step done by et.c). Unlike the minor collections in other GCs, this one +occurs at a well-defined time, with no stack roots to scan. -Later we'll need to consider what occurs if a nursery grows too big -while the transaction is still not finished. Probably somehow run a -collection of the nursery itself, not touching the global area. - -Of course we also need to do from time to time a major collection. We -will need at some point some concurrency here, to be able to run the -major collection in a random thread t but detecting changes done by the -other threads overwriting objects during their own end-of-transaction -collections. +| We also need to consider what occurs if a nursery grows too big while +| the transaction is still not finished. In this case we need to run a +| similar collection of the nursery, but with stack roots to scan. We +| call this a local collection. +| +| This can also occur before or after we call transaction.run(), when +| there is only the main thread running. In this mode, we run the main +| thread with a nursery too. It can fill up, needing a local collection. +| When transaction.run() is called, we also do a local collection to +| ensure that the nursery of the main thread is empty while the +| transactions execute. +| +| Of course we also need to do from time to time a major collection. We +| will need at some point some concurrency here, to be able to run the +| major collection in a random thread t but detecting changes done by the +| other threads overwriting objects during their own end-of-transaction +| collections. See below. GC flags @@ -68,16 +80,13 @@ (Optimization: objects declared immutable don't need a version number.) -GC_WAS_COPIED should rather be some counter, counting how many threads +TODO: GC_WAS_COPIED should rather be some counter, counting how many threads have a local copy; something like 2 or 3 bits, where the maximum value means "overflowed" and is sticky (maybe until some global synchronization point, if we have one). Or, we can be more advanced and use 4-5 bits, where in addition we use some "thread hash" value if there is only one copy. -<< NOW: implemented a minimal GC model with these properties. We have -GC_GLOBAL, a single bit of GC_WAS_COPIED, and the version number. >> - stm_read -------- @@ -102,8 +111,8 @@ depending on cases). And if the read is accepted then we need to remember in a local list that we've read that object. -<< NOW: the thread's local dictionary is in C, as a search tree. -The rest of the logic here is straightforward. >> +For now the thread's local dictionary is in C, as a widely-branching +search tree. stm_write @@ -123,10 +132,9 @@ consistent copy (i.e. nobody changed the object in the middle of us reading it). If it is too recent, then we might have to abort. -<< NOW: done, straightforward >> - TODO: how do we handle MemoryErrors when making a local copy?? Maybe force the transaction to abort, and then re-raise MemoryError +--- for now it's just a fatal error. End-of-transaction collections @@ -146,61 +154,73 @@ We need to check that each of these global objects' versions have not been modified in the meantime. -<< NOW: done, kind of easy >> - -Annotator support ------------------ +Static analysis support +----------------------- To get good performance, we should as much as possible use the 'localobj' version of every object instead of the 'obj' one. At least after a write barrier we should replace the local variable 'obj' with -'localobj', and someone (the annotator? or later?) should propagate the +'localobj', and translator.stm.transform propagates the fact that it is now a localobj that doesn't need special stm support any longer. Similarly, all mallocs return a localobj. -The "force local copy" hint should be used on PyFrame before the main +The "stm_write=True" hint is used on PyFrame before the main interpreter loop, so that we can then be sure that all accesses to -'frame' are to a local obj. Ideally, we could even track which fields +'frame' are to a local obj. + +TODO: Ideally, we could even track which fields of a localobj are themselves localobjs. This would be useful for 'PyFrame.fastlocals_w': it should also be known to always be a localobj. -<< NOW: done in the basic form by translator/stm/transform.py. -Runs late (just before C databasing). Should work well enough to -remove the maximum number of write barriers, but still missing -PyFrame.fastlocals_w. >> - Local collections ----------------- -If the nursery fills up too much during a transaction, it needs to be -locally collected. This is supposed to be a generally rare occurrance. +| +| This needs to be done. +| + +If a nursery fills up too much during a transaction, it needs to be +locally collected. This is supposed to be a generally rare occurrance, +with the exception of long-running transactions --- including the main +thread before transaction.run(). + +Surviving local objects are moved to the global area. However, the +GC_GLOBAL flag is still not set on them, because they are still not +visible from more than one thread. For now we have to put all such +objects in a list: the list of old-but-local objects. (Some of these +objects can still have the GC_WAS_COPIED flag and so be duplicates of +other really global objects. The dict maintained by et.c must be +updated when we move these objects.) + Unlike end-of-transaction collections, we need to have the stack roots -of the current transaction. Because such collections are more rare than -in previous GCs, we could use for performance a completely different -approach: conservatively scan the stack, finding everything that looks -like a pointer to an object in the nursery; mark these objects as roots; -and do a local collection from there. We need either a non-moving GC or -at least to pin the potential roots. Pinning is better in the sense -that it should ideally pin a small number of objects, and all other -objects can move away; this would free most of the nursery again. -Afterwards we can still use a bump-pointer allocation technique, to -allocate within each area between the pinned objects. The objects are -pinned just for one local collection, which means that number of such -pinned objects should remain roughly constant as time passes. +of the current transaction. For now we just use +"gcrootfinder=shadowstack" with thread-local variables. At the end of +the local collection, we do a sweep: all objects that were previously +listed as old-but-local but don't survive the present collection are +marked as free. -The local collection is also a good time to compress the local list of -all global reads done --- "compress" in the sense of removing -duplicates. +TODO: Try to have a generational behavior here. Could probably be done +by (carefully) promoting part of the surviving objects to GC_GLOBAL. -<< do later; memory usage grows unboundedly during one transaction for -now. >> +If implemented like minimarkpage.py, the global area has for each size a +chained list of pages that are (at least partially) free. We make the +heads of the chained lists thread-locals; so each thread reserves one +complete page at a time, reducing cross-thread synchronizations. + +TODO: The local collection would also be a good time to compress the +local list of all global reads done --- "compress" in the sense of +removing duplicates. Global collections ------------------ +| +| This needs to be done. +| + We will sometimes need to do a "major" collection, called global collection here. The issue with it is that there might be live references to global objects in the local objects of any thread. The @@ -208,30 +228,29 @@ some system call. As an intermediate solution that should work well enough, we could try to acquire a lock for every thread, a kind of LIL (local interpreter lock). Every thread releases its LIL around -potentially-blocking system calls. At the end of a transaction and -maybe once per local collection, we also do the equivalent of a -release-and-require-the-LIL. +potentially-blocking system calls. At the end of a transaction and once +per local collection, we also do the equivalent of a +release-and-require-the-LIL. The point is that when a LIL is released, +another thread can acquire it temporarily and read the shadowstack of +that thread. -The major collection could be orchestrated by either the thread that -noticed one should start, or by its own thread. We first acquire all -the LILs, and for every LIL, we ask the corresponding thread to do a -local marking, starting from their own stacks and scanning their local -nurseries. Out of this, we obtain a list of global objects. +The major collection is orchestrated by whichever thread noticed one +should start; let's call this thread tg. So tg first acquires all the +LILs. (A way to force another thread to "soon" release its LIL is to +artifically mark its nursery as exhausted.) For each thread t, tg +performs a local collection for t. This empties all the nurseries and +gives tg an up-to-date point of view on the liveness of the objects: the +various lists of old-but-local objects for all the t's. tg can use +these --- plus external roots like prebuilt objects --- as the roots of +a second-level, global mark-and-sweep. -Then we can resume running the threads while at the same time doing a -mark-n-sweep collection of the global objects. There is never any -pointer from a global object to a local object, but some global objects -are duplicated in one or several local nurseries. To simplify, these -duplicates should be considered as additional roots for local marking, -and the original objects should be additional roots for global marking. -At some point we might figure out a way to allow duplicated objects to -be freed too. +For now we release the LILs only when the major collection is finished. -The global objects are read-only, at least if there is no commit. If we -don't want to block the other threads we need support for detecting -commit-time concurrent writes. Alternatively, we can ask the threads to -do all together a parallel global marking; this would have a -stop-the-world effect, but require no concurrency detection mechanism. +TODO: either release the LILs earlier, say after we processed the lists +of old-but-local objects but before we went on marking and sweeping --- +but we need support for detecting concurrent writes done by concurrent +commits; or, ask all threads currently waiting on the LIL to help with +doing the global mark-and-sweep in parallel. Note: standard terminology: @@ -242,12 +261,6 @@ * Parallelism: there are multiple threads all doing something GC-related, like all scanning the heap together. -<< at first the global area keeps growing unboundedly. The next step -will be to add the LIL but run the global collection by keeping all -other threads blocked. NOW: think about, at least, doing "minor -collections" on the global area *before* we even start running -transactions. >> - When not running transactively ------------------------------ @@ -255,25 +268,13 @@ The above describes the mode during which there is a main thread blocked in transaction.run(). The other mode is mostly that of "start-up", before we call transaction.run(). Of course no STM is needed in that -mode, but it's still running the same STM-enabled interpreter. We need -to figure out how to tweak the above concepts for that mode. +mode, but it's still running the same STM-enabled interpreter. -We can probably abuse the notion of nursery above, by running with one -nursery (corresponding to the only thread running, the main thread). We -would need to do collections that are some intermediate between "local -collections" and "end-of-transaction collections". Likely, a scheme -that might work would be similar to local collections (with some pinned -objects) but where surviving non-pinned objects are moved to become -global objects. - -This needs a bit more thinking: the issue is that when transaction.run() -is called, we can try to do such a collection, but what about the pinned -objects? - -<< NOW: the global area is just the "nursery" for the main thread. -stm_writebarrier of 'obj' return 'obj' in the main thread. All -allocations get us directly a global object, but allocated from -the "nursery" of the main thread, with bump-pointer allocation. >> +| In this mode, we just have one nursery and the global area. When +| transaction.run() is called, we do a local collection to empty it, then +| make sure to flag all surviving objects as GC_GLOBAL in preparation for +| starting actual transactions. Then we can reuse the nursery itself for +| one of the threads. Pointer equality @@ -284,18 +285,11 @@ This is all llops of the form ``ptr_eq(x, y)`` or ``ptr_ne(x, y)``. If we know statically that both copies are local copies, then we can -just compare the pointers. Otherwise we need to check their GC_GLOBAL -and GC_WAS_COPIED flag, and potentially if they both have GC_WAS_COPIED -but only one of them has GC_GLOBAL, we need to check in the local -dictionary if they map to each other. And we need to take care of the -cases of NULL pointers. - -<< NOW: done, without needing the local dictionary: -stm_normalize_global(obj) returns globalobj if obj is a local, -WAS_COPIED object. Then a pointer comparison 'x == y' becomes -stm_normalize_global(x) == stm_normalize_global(y). Moreover -the call to stm_normalize_global() can be omitted for constants. >> - +just compare the pointers. Otherwise, we compare +``stm_normalize_global(x)`` with ``stm_normalize_global(y)``, where +``stm_normalize_global(obj)`` returns ``globalobj`` if ``obj`` is a +local, GC_WAS_COPIED object. Moreover the call to +``stm_normalize_global()`` can be omitted for constants. notes From noreply at buildbot.pypy.org Sat Mar 31 14:27:44 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 14:27:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a future "JIT support" section. Message-ID: <20120331122744.AC8588236A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4167:30842a20ee1a Date: 2012-03-30 18:22 +0200 http://bitbucket.org/pypy/extradoc/changeset/30842a20ee1a/ Log: Add a future "JIT support" section. diff --git a/planning/stm.txt b/planning/stm.txt --- a/planning/stm.txt +++ b/planning/stm.txt @@ -292,6 +292,12 @@ ``stm_normalize_global()`` can be omitted for constants. +JIT support +----------- + +TODO + + notes ----- From noreply at buildbot.pypy.org Sat Mar 31 14:27:46 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 14:27:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge heads Message-ID: <20120331122746.549728236B@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4168:9c8dcd01475e Date: 2012-03-31 14:21 +0200 http://bitbucket.org/pypy/extradoc/changeset/9c8dcd01475e/ Log: merge heads diff --git a/blog/draft/pycon-wrapup.rst b/blog/draft/pycon-wrapup.rst new file mode 100644 --- /dev/null +++ b/blog/draft/pycon-wrapup.rst @@ -0,0 +1,27 @@ +PyCon 2012 +========== + +So, PyCon happened. This was the biggest PyCon ever and probably the biggest +gathering of Python hackers ever. + +From the PyPy perspective, a lot at PyCon was about PyPy. Listing things: + +* David Beazley did an excellent keynote on trying to dive head-first into + PyPy and at least partly failing. He however did not fail to explain + bits and pieces about PyPy's architecture. `Video`_ is available. + +* We gave tons of talks, including the `tutorial`_ and `why pypy by example`_. + +* We had a giant influx of new commiters, easily doubling the amount of pull + requests ever created for PyPy. The main topics for newcomers were numpy and + py3k, disproving what David said about PyPy being too hard to dive into ;) + +* Guido argued in his keynote that Python is not too slow. In the meantime, + we're trying to `prove him correct`_ :-) + +* XXX stuff stuff + +.. _`Video`: xxx +.. _`tutorial`: xxx +.. _`why pypy by example`: xxx +.. _`prove him correct`: http://mrjoes.github.com/2011/12/15/sockjs-bench.html diff --git a/talk/uct2012/abstract.rst b/talk/uct2012/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/uct2012/abstract.rst @@ -0,0 +1,16 @@ +Building fast enough VMs in fast enough time +============================================ + +PyPy is a fast interpreter for the Python programming language. This however, +not the whole description. It's also a framework for building efficient +Virtual Machines for dynamic languages with relatively little effort. + +In this talk I would like to walk people through how the unique +infrastructure provided by the PyPy project let's you write efficient +virtual machines with minimal effort. This talk will cover the +architecture of the PyPy project, how to use it in your own VMs as +well as how to hook up an efficient garbage collector and Just In Time +compiler with minimal effort. + +This talk assumes no prior exposure to compiler techniques and assumes +some very basic knowledge of the Python programming language. diff --git a/talk/uct2012/talk.rst b/talk/uct2012/talk.rst new file mode 100644 --- /dev/null +++ b/talk/uct2012/talk.rst @@ -0,0 +1,85 @@ +Fast enough VMs in fast enough time +=================================== + +Who am I? +--------- + +* PyPy developer since 2006 + +XXX + +What is PyPy? +------------- + +* an open source project + +* a Python interpreter + +* **a framework for writing dynamic language VMs** + +* an agile project sponsored by the EU and others + +What is a VM? +------------- + +* a program + +* input: a program + +* output: the result of executing that program + +What does a VM look like? +------------------------- + +* Lexical/analysis parsing (what are the symbols in the program) + +* AST construction (what is the structure of the program) + +* Bytecode compilation (optional) + +* Execution + +Where does PyPy come in? +------------------------ + +* Tools for writing these programs quickly, and efficiently. + + * Helpers for things like parsing + + * Free JIT, and garbage collectors + +* Mostly you write a totally normal VM in python, and it becomes magically fast + +PyPy architecture +----------------- + +* snakes all the way down + +* everything is written in Python - including JIT, GC, etc. + +* to be precise, a **subset** of Python, called RPython + +* your VM has to be implemented in RPython + +RPython - the good +------------------ + +* The good - it's mostly Python + +* Just write python and fix it later + +RPython - the bad +----------------- + +* It's restricted + +* Most dynamic features don't work, but you can employ all kinds of tricks during import + +RPython - the ugly +------------------- + +* Documentation + +* Error messages + +* Global type inference From noreply at buildbot.pypy.org Sat Mar 31 15:30:25 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 15:30:25 +0200 (CEST) Subject: [pypy-commit] pypy default: debug_repr() may not be implemented in all subclasses. It's usually a Message-ID: <20120331133025.D04FF82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54107:5b9f7aa356a0 Date: 2012-03-31 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/5b9f7aa356a0/ Log: debug_repr() may not be implemented in all subclasses. It's usually a minor bug because it's a useful debugging feature, but still, it shouldn't give us segfaults in the translated pypy. diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -107,6 +107,10 @@ arr.compute_first_step(self, f) return f + def debug_repr(self): + # should be overridden, but in case it isn't, provide a default + return str(self) + class ConcreteSignature(Signature): _immutable_fields_ = ['dtype'] From noreply at buildbot.pypy.org Sat Mar 31 16:33:07 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 16:33:07 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Expose bool_cas() to the RPython level. Interface tentative so far. Message-ID: <20120331143307.18B7C82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54108:b089f7f2a92e Date: 2012-03-31 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/b089f7f2a92e/ Log: Expose bool_cas() to the RPython level. Interface tentative so far. diff --git a/pypy/rlib/atomic_ops.py b/pypy/rlib/atomic_ops.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/atomic_ops.py @@ -0,0 +1,23 @@ +import py +from pypy.tool.autopath import pypydir +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.translator.tool.cbuild import ExternalCompilationInfo + + +cdir = py.path.local(pypydir) / 'translator' / 'stm' +cdir2 = py.path.local(pypydir) / 'translator' / 'c' + +eci = ExternalCompilationInfo( + include_dirs = [cdir, cdir2], + post_include_bits = [''' +#include "src_stm/atomic_ops.h" +#define pypy_bool_cas(ptr, old, _new) \\ + bool_cas((volatile unsigned long*)(ptr), \\ + (unsigned long)(old), \\ + (unsigned long)(_new)) +'''], +) + + +bool_cas = rffi.llexternal('pypy_bool_cas', [llmemory.Address]*3, lltype.Bool, + compilation_info=eci, macro=True) diff --git a/pypy/rlib/test/test_atomic_ops.py b/pypy/rlib/test/test_atomic_ops.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/test_atomic_ops.py @@ -0,0 +1,31 @@ +from pypy.rlib.atomic_ops import bool_cas +from pypy.rpython.lltypesystem import lltype, llmemory, rffi + + +ARRAY = rffi.CArray(llmemory.Address) + +def test_bool_cas(): + a = lltype.malloc(ARRAY, 1, flavor='raw') + a[0] = rffi.cast(llmemory.Address, 42) + # + res = bool_cas(rffi.cast(llmemory.Address, a), + rffi.cast(llmemory.Address, 42), + rffi.cast(llmemory.Address, 43)) + assert res == True + assert rffi.cast(lltype.Signed, a[0]) == 43 + # + res = bool_cas(rffi.cast(llmemory.Address, a), + rffi.cast(llmemory.Address, 42), + rffi.cast(llmemory.Address, 44)) + assert res == False + assert rffi.cast(lltype.Signed, a[0]) == 43 + # + lltype.free(a, flavor='raw') + return 0 + +def test_translate_bool_cas(): + from pypy.translator.c.test.test_genc import compile + + f = compile(test_bool_cas, []) + res = f() + assert res == 0 diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h --- a/pypy/translator/c/src/g_prerequisite.h +++ b/pypy/translator/c/src/g_prerequisite.h @@ -17,11 +17,4 @@ #include -#ifdef __GNUC__ /* other platforms too, probably */ -typedef _Bool bool_t; -#else -typedef unsigned char bool_t; -#endif - - #include "src/align.h" diff --git a/pypy/translator/stm/src_stm/atomic_ops.h b/pypy/translator/stm/src_stm/atomic_ops.h --- a/pypy/translator/stm/src_stm/atomic_ops.h +++ b/pypy/translator/stm/src_stm/atomic_ops.h @@ -1,3 +1,5 @@ +#ifndef _SRCSTM_ATOMIC_OPS_ +#define _SRCSTM_ATOMIC_OPS_ /* "compiler fence" for preventing reordering of loads/stores to @@ -45,3 +47,6 @@ relevant data from memory after the spinloop */ asm volatile ("pause":::"memory"); } + + +#endif /* _SRCSTM_ATOMIC_OPS_ */ diff --git a/pypy/translator/tool/cbuild.py b/pypy/translator/tool/cbuild.py --- a/pypy/translator/tool/cbuild.py +++ b/pypy/translator/tool/cbuild.py @@ -329,4 +329,9 @@ typedef unsigned long Unsigned; # define SIGNED_MIN LONG_MIN #endif +#ifdef __GNUC__ /* other platforms too, probably */ +typedef _Bool bool_t; +#else +typedef unsigned char bool_t; +#endif ''' From noreply at buildbot.pypy.org Sat Mar 31 17:58:41 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 17:58:41 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Add a non-passing test. Message-ID: <20120331155841.D4F5082252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54109:1396dcd47d1c Date: 2012-03-31 17:01 +0200 http://bitbucket.org/pypy/pypy/changeset/1396dcd47d1c/ Log: Add a non-passing test. diff --git a/pypy/rpython/memory/gc/stmgc.py b/pypy/rpython/memory/gc/stmgc.py --- a/pypy/rpython/memory/gc/stmgc.py +++ b/pypy/rpython/memory/gc/stmgc.py @@ -147,14 +147,13 @@ def _allocate_bump_pointer(self, tls, size): free = tls.nursery_free top = tls.nursery_top - new = free + size - tls.nursery_free = new - if new > top: - free = self.local_collection(free) + if (top - free) < llmemory.raw_malloc_usage(size): + free = self.local_collection(size) + tls.nursery_free = free + size return free @dont_inline - def local_collection(self, oldfree): + def local_collection(self, size): raise MemoryError("nursery exhausted") # XXX for now diff --git a/pypy/rpython/memory/gc/test/test_stmgc.py b/pypy/rpython/memory/gc/test/test_stmgc.py --- a/pypy/rpython/memory/gc/test/test_stmgc.py +++ b/pypy/rpython/memory/gc/test/test_stmgc.py @@ -588,3 +588,7 @@ assert a == sr1_adr a = self.gc.stm_normalize_global(tr1_adr) assert a == sr1_adr + + def test_alloc_a_lot_from_main_thread(self): + for i in range(1000): + sr1, sr1_adr = self.malloc(SR) From noreply at buildbot.pypy.org Sat Mar 31 17:58:43 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 17:58:43 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Remove the repeated malloc/free. Message-ID: <20120331155843.1372282252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54110:ae6f36ad574b Date: 2012-03-31 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ae6f36ad574b/ Log: Remove the repeated malloc/free. diff --git a/pypy/module/transaction/interp_epoll.py b/pypy/module/transaction/interp_epoll.py --- a/pypy/module/transaction/interp_epoll.py +++ b/pypy/module/transaction/interp_epoll.py @@ -25,28 +25,32 @@ class EPollPending(interp_transaction.AbstractPending): + maxevents = FD_SETSIZE - 1 # for now + evs = lltype.nullptr(rffi.CArray(epoll_event)) + def __init__(self, space, epoller, w_callback): self.space = space self.epoller = epoller self.w_callback = w_callback + self.evs = lltype.malloc(rffi.CArray(epoll_event), self.maxevents, + flavor='raw', add_memory_pressure=True, + track_allocation=False) + + def __del__(self): + evs = self.evs + if evs: + self.evs = lltype.nullptr(rffi.CArray(epoll_event)) + lltype.free(evs, flavor='raw', track_allocation=False) def run(self): # This code is run non-transactionally. Careful, no GC available. state = interp_transaction.state if state.has_exception(): return - maxevents = FD_SETSIZE - 1 # for now - evs = lltype.malloc(rffi.CArray(epoll_event), maxevents, flavor='raw') - try: - self.wait_and_process_events(evs, maxevents) - finally: - lltype.free(evs, flavor='raw') - - def wait_and_process_events(self, evs, maxevents): fd = rffi.cast(rffi.INT, self.epoller.epfd) - maxevents = rffi.cast(rffi.INT, maxevents) + maxevents = rffi.cast(rffi.INT, self.maxevents) timeout = rffi.cast(rffi.INT, 500) # for now: half a second - nfds = _epoll_wait(fd, evs, maxevents, timeout) + nfds = _epoll_wait(fd, self.evs, maxevents, timeout) nfds = rffi.cast(lltype.Signed, nfds) # if nfds < 0: @@ -63,7 +67,6 @@ # allocate anything here because we are not running transactionally. # Workaround for now: run a new tiny transaction just to create # and register these PendingCallback's. - self.evs = evs self.nfds = nfds rstm.perform_transaction(EPollPending._add_real_transactions, EPollPending, self) From noreply at buildbot.pypy.org Sat Mar 31 17:58:44 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 17:58:44 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Add 'transaction.remove_epoll()'. Message-ID: <20120331155844.586EC82252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54111:22ab9f05fb91 Date: 2012-03-31 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/22ab9f05fb91/ Log: Add 'transaction.remove_epoll()'. diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -15,6 +15,9 @@ _pending[r] = (f, args) def add_epoll(ep, callback): + for key, (f, args) in _pending.items(): + if getattr(f, '_reads_from_epoll_', None) is ep: + raise ValueError("add_epoll(ep): ep is already registered") def poll_reader(): # assume only one epoll is added. If the _pending list is # now empty, wait. If not, then just poll non-blockingly. @@ -26,8 +29,17 @@ for fd, events in got: add(callback, fd, events) add(poll_reader) + poll_reader._reads_from_epoll_ = ep add(poll_reader) +def remove_epoll(ep): + for key, (f, args) in _pending.items(): + if getattr(f, '_reads_from_epoll_', None) is ep: + del _pending[key] + break + else: + raise ValueError("remove_epoll(ep): ep is not registered") + def run(): pending = _pending try: diff --git a/pypy/module/transaction/__init__.py b/pypy/module/transaction/__init__.py --- a/pypy/module/transaction/__init__.py +++ b/pypy/module/transaction/__init__.py @@ -9,7 +9,8 @@ 'set_num_threads': 'interp_transaction.set_num_threads', 'add': 'interp_transaction.add', 'run': 'interp_transaction.run', - 'add_epoll': 'interp_epoll.add_epoll', # xxx linux only + 'add_epoll': 'interp_epoll.add_epoll', # xxx linux only + 'remove_epoll': 'interp_epoll.remove_epoll', # xxx linux only } appleveldefs = { diff --git a/pypy/module/transaction/interp_epoll.py b/pypy/module/transaction/interp_epoll.py --- a/pypy/module/transaction/interp_epoll.py +++ b/pypy/module/transaction/interp_epoll.py @@ -35,6 +35,7 @@ self.evs = lltype.malloc(rffi.CArray(epoll_event), self.maxevents, flavor='raw', add_memory_pressure=True, track_allocation=False) + self.force_quit = False def __del__(self): evs = self.evs @@ -45,7 +46,7 @@ def run(self): # This code is run non-transactionally. Careful, no GC available. state = interp_transaction.state - if state.has_exception(): + if state.has_exception() or self.force_quit: return fd = rffi.cast(rffi.INT, self.epoller.epfd) maxevents = rffi.cast(rffi.INT, self.maxevents) @@ -107,4 +108,25 @@ @unwrap_spec(epoller=W_Epoll) def add_epoll(space, epoller, w_callback): - EPollPending(space, epoller, w_callback).register() + state = interp_transaction.state + if state.epolls is None: + state.epolls = {} + elif epoller in state.epolls: + raise OperationError(space.w_ValueError, + space.wrap("add_epoll(ep): ep is already registered")) + pending = EPollPending(space, epoller, w_callback) + state.epolls[epoller] = pending + pending.register() + + at unwrap_spec(epoller=W_Epoll) +def remove_epoll(space, epoller): + state = interp_transaction.state + if state.epolls is None: + pending = None + else: + pending = state.epolls.get(epoller, None) + if pending is None: + raise OperationError(space.w_ValueError, + space.wrap("remove_epoll(ep): ep is not registered")) + pending.force_quit = True + del state.epolls[epoller] diff --git a/pypy/module/transaction/interp_transaction.py b/pypy/module/transaction/interp_transaction.py --- a/pypy/module/transaction/interp_transaction.py +++ b/pypy/module/transaction/interp_transaction.py @@ -23,6 +23,7 @@ self.ll_no_tasks_pending_lock = threadintf.null_ll_lock self.ll_unfinished_lock = threadintf.null_ll_lock self.threadobjs = {} # empty during translation + self.epolls = None self.pending = Fifo() def _freeze_(self): @@ -305,6 +306,7 @@ assert not state.is_locked_no_tasks_pending() state.clear_all_values_apart_from_main() state.running = False + state.epolls = None # # now re-raise the exception that we got in a transaction state.close_exceptions() diff --git a/pypy/module/transaction/test/test_epoll.py b/pypy/module/transaction/test/test_epoll.py --- a/pypy/module/transaction/test/test_epoll.py +++ b/pypy/module/transaction/test/test_epoll.py @@ -50,6 +50,26 @@ raises(Done, transaction.run) assert steps == ['write_stuff', 'callback'] + def test_remove_closed_epoll(self): + import transaction, select, posix as os + + fd_read, fd_write = os.pipe() + + epoller = select.epoll() + epoller.register(fd_read) + + # we run it 10 times in order to get both possible orders in + # the emulator + for i in range(10): + transaction.add_epoll(epoller, lambda *args: not_actually_callable) + transaction.add(transaction.remove_epoll, epoller) + transaction.run() + # assert didn't deadlock + transaction.add(transaction.remove_epoll, epoller) + transaction.add_epoll(epoller, lambda *args: not_actually_callable) + transaction.run() + # assert didn't deadlock + class AppTestEpollEmulator(AppTestEpoll): def setup_class(cls): From noreply at buildbot.pypy.org Sat Mar 31 18:17:15 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 18:17:15 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Document transaction.py. Synchronize the exception behavior with module/transaction/. Message-ID: <20120331161715.6D8A782252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54112:6c835b5c43e5 Date: 2012-03-31 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/6c835b5c43e5/ Log: Document transaction.py. Synchronize the exception behavior with module/transaction/. diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -5,19 +5,41 @@ print >> sys.stderr, "warning: using lib_pypy/transaction.py, the emulator" _pending = {} +_in_transaction = False + + +class TransactionError(Exception): + pass + def set_num_threads(num): - pass + """Set the number of threads to use. In a real implementation, + the transactions will attempt to use 'num' threads in parallel. + """ -def add(f, *args): + +def add(f, *args, **kwds): + """Register the call 'f(*args, **kwds)' as running a new + transaction. If we are currently running in a transaction too, the + new transaction will only start after the end of the current + transaction. Note that if the same or another transaction raises an + exception in the meantime, all pending transactions are cancelled. + """ r = random.random() assert r not in _pending # very bad luck if it is - _pending[r] = (f, args) + _pending[r] = (f, args, kwds) + def add_epoll(ep, callback): - for key, (f, args) in _pending.items(): + """Register the epoll object (from the 'select' module). For any + event (fd, events) detected by 'ep', a new transaction will be + started invoking 'callback(fd, events)'. Note that all fds should + be registered with the flag select.EPOLLONESHOT, and re-registered + from the callback if needed. + """ + for key, (f, args, kwds) in _pending.items(): if getattr(f, '_reads_from_epoll_', None) is ep: - raise ValueError("add_epoll(ep): ep is already registered") + raise TransactionError("add_epoll(ep): ep is already registered") def poll_reader(): # assume only one epoll is added. If the _pending list is # now empty, wait. If not, then just poll non-blockingly. @@ -33,18 +55,33 @@ add(poll_reader) def remove_epoll(ep): - for key, (f, args) in _pending.items(): + """Explicitly unregister the epoll object. Note that raising an + exception in a transaction also cancels any add_epoll(). + """ + for key, (f, args, kwds) in _pending.items(): if getattr(f, '_reads_from_epoll_', None) is ep: del _pending[key] break else: - raise ValueError("remove_epoll(ep): ep is not registered") + raise TransactionError("remove_epoll(ep): ep is not registered") def run(): + """Run the pending transactions, as well as all transactions started + by them, and so on. The order is random and undeterministic. Must + be called from the main program, i.e. not from within another + transaction. If at some point all transactions are done, returns. + If a transaction raises an exception, it propagates here; in this + case all pending transactions are cancelled. + """ + global _pending, _in_transaction + if _in_transaction: + raise TransactionError("recursive invocation of transaction.run()") pending = _pending try: + _in_transaction = True while pending: - _, (f, args) = pending.popitem() - f(*args) + _, (f, args, kwds) = pending.popitem() + f(*args, **kwds) finally: + _in_transaction = False pending.clear() # this is the behavior we get with interp_transaction diff --git a/pypy/module/transaction/interp_epoll.py b/pypy/module/transaction/interp_epoll.py --- a/pypy/module/transaction/interp_epoll.py +++ b/pypy/module/transaction/interp_epoll.py @@ -112,7 +112,7 @@ if state.epolls is None: state.epolls = {} elif epoller in state.epolls: - raise OperationError(space.w_ValueError, + raise OperationError(state.w_error, space.wrap("add_epoll(ep): ep is already registered")) pending = EPollPending(space, epoller, w_callback) state.epolls[epoller] = pending @@ -126,7 +126,7 @@ else: pending = state.epolls.get(epoller, None) if pending is None: - raise OperationError(space.w_ValueError, + raise OperationError(state.w_error, space.wrap("remove_epoll(ep): ep is not registered")) pending.force_quit = True del state.epolls[epoller] diff --git a/pypy/module/transaction/test/test_epoll.py b/pypy/module/transaction/test/test_epoll.py --- a/pypy/module/transaction/test/test_epoll.py +++ b/pypy/module/transaction/test/test_epoll.py @@ -70,6 +70,17 @@ transaction.run() # assert didn't deadlock + def test_errors(self): + import transaction, select + epoller = select.epoll() + callback = lambda *args: not_actually_callable + transaction.add_epoll(epoller, callback) + raises(transaction.TransactionError, + transaction.add_epoll, epoller, callback) + transaction.remove_epoll(epoller) + raises(transaction.TransactionError, + transaction.remove_epoll, epoller) + class AppTestEpollEmulator(AppTestEpoll): def setup_class(cls): From noreply at buildbot.pypy.org Sat Mar 31 18:19:29 2012 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Mar 2012 18:19:29 +0200 (CEST) Subject: [pypy-commit] pypy stm-gc: Clarify. Message-ID: <20120331161929.6A16082252@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-gc Changeset: r54113:8216dfd8284b Date: 2012-03-31 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/8216dfd8284b/ Log: Clarify. diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -56,7 +56,7 @@ def remove_epoll(ep): """Explicitly unregister the epoll object. Note that raising an - exception in a transaction also cancels any add_epoll(). + exception in a transaction to abort run() also unregisters all epolls. """ for key, (f, args, kwds) in _pending.items(): if getattr(f, '_reads_from_epoll_', None) is ep: