From noreply at buildbot.pypy.org Wed Oct 1 00:21:50 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 00:21:50 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes5: merge default into branch Message-ID: <20140930222150.51BC81C094F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes5 Changeset: r73748:0559342425bb Date: 2014-09-30 20:32 +0300 http://bitbucket.org/pypy/pypy/changeset/0559342425bb/ Log: merge default into branch diff too long, truncating to 2000 out of 5071 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -367,3 +367,43 @@ Detailed license information is contained in the NOTICE file in the directory. + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. diff --git a/lib-python/2.7/test/test_select.py b/lib-python/2.7/test/test_select.py --- a/lib-python/2.7/test/test_select.py +++ b/lib-python/2.7/test/test_select.py @@ -57,7 +57,17 @@ del a[-1] return sys.__stdout__.fileno() a[:] = [F()] * 10 - self.assertEqual(select.select([], a, []), ([], a[:5], [])) + result = select.select([], a, []) + # CPython: 'a' ends up with 5 items, because each fileno() + # removes an item and at the middle the iteration stops. + # PyPy: 'a' ends up empty, because the iteration is done on + # a copy of the original list: fileno() is called 10 times. + if test_support.check_impl_detail(cpython=True): + self.assertEqual(len(result[1]), 5) + self.assertEqual(len(a), 5) + if test_support.check_impl_detail(pypy=True): + self.assertEqual(len(result[1]), 10) + self.assertEqual(len(a), 0) def test_main(): test_support.run_unittest(SelectTestCase) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -266,10 +266,16 @@ buf = None if typ == rwinreg.REG_DWORD: - if space.isinstance_w(w_value, space.w_int): + if space.is_none(w_value) or ( + space.isinstance_w(w_value, space.w_int) or + space.isinstance_w(w_value, space.w_long)): + if space.is_none(w_value): + value = r_uint(0) + else: + value = space.c_uint_w(w_value) buflen = rffi.sizeof(rwin32.DWORD) buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw') - buf1[0] = space.uint_w(w_value) + buf1[0] = value buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -40,7 +40,7 @@ cls.w_tmpfilename = space.wrap(str(udir.join('winreg-temp'))) test_data = [ - ("Int Value", 45, _winreg.REG_DWORD), + ("Int Value", 0xFEDCBA98, _winreg.REG_DWORD), ("Str Value", "A string Value", _winreg.REG_SZ), ("Unicode Value", u"A unicode Value", _winreg.REG_SZ), ("Str Expand", "The path is %path%", _winreg.REG_EXPAND_SZ), @@ -137,9 +137,11 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx, REG_BINARY + from _winreg import CreateKey, SetValueEx, REG_BINARY, REG_DWORD key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, None) + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, 45) for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -617,7 +617,7 @@ 'raw_store': 1, 'same_as': 2, 'setarrayitem_gc': 8, - 'setfield_gc': 21, + 'setfield_gc': 22, }) def define_argsort(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -382,12 +382,16 @@ ... p20 = force_token() p22 = new_with_vtable(...) - p24 = new_array(1, descr=) + p24 = new_array_clear(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) {{{ setfield_gc(p0, p20, descr=) + setfield_gc(p22, ConstPtr(null), descr=) + setfield_gc(p22, ConstPtr(null), descr=) setfield_gc(p22, 1, descr=) + setfield_gc(p22, ConstPtr(null), descr=) setfield_gc(p26, ConstPtr(ptr22), descr=) + setfield_gc(p26, ConstPtr(null), descr=) setarrayitem_gc(p24, 0, p26, descr=) setfield_gc(p22, p24, descr=) }}} diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -68,10 +68,13 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) + p15 = new_array_clear(8, descr=) setfield_gc(p13, p15, descr=) i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + {{{ setfield_gc(p13, 16, descr=) + setfield_gc(p13, 0, descr=) + }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -110,9 +110,12 @@ i85 = strlen(p80) p86 = new(descr=) p88 = newstr(23) - setfield_gc(..., descr=) - setfield_gc(..., descr=) - setfield_gc(..., descr=) + {{{ + setfield_gc(p86, 0, descr=) + setfield_gc(p86, p88, descr=) + setfield_gc(p86, 23, descr=) + setfield_gc(p86, 23, descr=) + }}} call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) guard_no_exception(descr=...) i89 = getfield_gc(p86, descr=) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -173,9 +173,9 @@ On Windows, only sockets are supported; on Unix, all file descriptors. """ - iwtd_w = space.listview(w_iwtd) - owtd_w = space.listview(w_owtd) - ewtd_w = space.listview(w_ewtd) + iwtd_w = space.unpackiterable(w_iwtd) + owtd_w = space.unpackiterable(w_owtd) + ewtd_w = space.unpackiterable(w_ewtd) if space.is_w(w_timeout, space.w_None): timeout = -1.0 diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -85,17 +85,18 @@ assert owtd == [writeend] total_out += writeend.send(b'x' * 512) total_in = 0 - while True: - iwtd, owtd, ewtd = select.select([readend], [], [], 0) + while total_in < total_out: + iwtd, owtd, ewtd = select.select([readend], [], [], 5) assert owtd == ewtd == [] - if iwtd == []: - break - assert iwtd == [readend] + assert iwtd == [readend] # there is more expected data = readend.recv(4096) assert len(data) > 0 assert data == b'x' * len(data) total_in += len(data) assert total_in == total_out + iwtd, owtd, ewtd = select.select([readend], [], [], 0) + assert owtd == ewtd == [] + assert iwtd == [] # there is not more expected finally: writeend.close() readend.close() @@ -304,6 +305,20 @@ for fd in rfds: os.close(fd) + def test_resize_list_in_select(self): + import select + class Foo(object): + def fileno(self): + print len(l) + if len(l) < 100: + l.append(Foo()) + return 0 + l = [Foo()] + select.select(l, (), (), 0) + assert 1 <= len(l) <= 100 + # ^^^ CPython gives 100, PyPy gives 1. I think both are OK as + # long as there is no crash. + class AppTestSelectWithSockets(_AppTestSelect): """Same tests with connected sockets. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -15,6 +15,7 @@ from rpython.rlib.rbigint import rbigint from rpython.rlib import rfloat from rpython.tool.sourcetools import func_with_new_name +from rpython.rtyper.lltypesystem.module.ll_math import math_fmod from pypy.objspace.std.intobject import W_IntObject @@ -360,21 +361,17 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - try: - mod = math.fmod(x, y) - except ValueError: - mod = rfloat.NAN + mod = math_fmod(x, y) + if mod: + # ensure the remainder has the same sign as the denominator + if (y < 0.0) != (mod < 0.0): + mod += y else: - if mod: - # ensure the remainder has the same sign as the denominator - if (y < 0.0) != (mod < 0.0): - mod += y - else: - # the remainder is zero, and in the presence of signed zeroes - # fmod returns different results across platforms; ensure - # it has the same sign as the denominator; we'd like to do - # "mod = y * 0.0", but that may get optimized away - mod = copysign(0.0, y) + # the remainder is zero, and in the presence of signed zeroes + # fmod returns different results across platforms; ensure + # it has the same sign as the denominator; we'd like to do + # "mod = y * 0.0", but that may get optimized away + mod = copysign(0.0, y) return W_FloatObject(mod) @@ -383,10 +380,7 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - try: - mod = math.fmod(x, y) - except ValueError: - return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] + mod = math_fmod(x, y) # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -580,11 +580,11 @@ elif self._thousands_sep: dec = "." thousands = "," - grouping = "\3\0" + grouping = "\3" else: dec = "." thousands = "" - grouping = "\256" + grouping = "\xFF" # special value to mean 'stop' if self.is_unicode: self._loc_dec = dec.decode("ascii") self._loc_thousands = thousands.decode("ascii") @@ -677,14 +677,16 @@ done = False previous = 0 while True: - group = ord(grouping[grouping_state]) - if group > 0: - if group == 256: + if grouping_state >= len(grouping): + group = previous # end of string + else: + # else, get the next value from the string + group = ord(grouping[grouping_state]) + if group == 0xFF: # special value to mean 'stop' break grouping_state += 1 previous = group - else: - group = previous + # final_grouping = min(group, max(left, max(min_width, 1))) n_zeros = max(0, final_grouping - left) n_chars = max(0, min(left, final_grouping)) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -794,7 +794,7 @@ raises(ValueError, float.fromhex, "0P") def test_division_edgecases(self): - import math + import math, os # inf inf = float("inf") @@ -803,6 +803,16 @@ x, y = divmod(inf, 3) assert math.isnan(x) assert math.isnan(y) + x, y = divmod(3, inf) + z = 3 % inf + if os.name == 'nt': + assert math.isnan(x) + assert math.isnan(y) + assert math.isnan(z) + else: + assert x == 0 + assert y == 3 + assert z == 3 # divide by 0 raises(ZeroDivisionError, lambda: inf % 0) diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -372,6 +372,7 @@ try: assert locale.format('%g', x, grouping=True) == '1,234.57' assert format(x, 'n') == '1,234.57' + assert format(12345678901234, 'n') == '12,345,678,901,234' finally: locale.setlocale(locale.LC_NUMERIC, 'C') diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -10,10 +10,6 @@ graph = loc['graph'] interp.malloc_check = False - def returns_null(T, *args, **kwds): - return lltype.nullptr(T) - interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now apply_jit(interp, graph, LLGraphCPU) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -49,6 +49,16 @@ os.system("chmod -R a+rX %s" % dirname) os.system("chmod -R g-w %s" % dirname) + +# +# Some crazy nonsense (imho) about including automatically the license +# of various libraries as they happen to be on this system. This is +# strange because most of these libraries are linked to dynamically, +# and so at runtime might end up with a different version. I (arigo) +# killed this logic and wrote some general info (which I hope is more +# sensible anyway) into our ../../../LICENSE file. +# +''' sep_template = "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" def generate_license(basedir, options): @@ -95,6 +105,7 @@ # Do something for gdbm, which is GPL txt += gdbm_bit return txt +''' def create_cffi_import_libraries(pypy_c, options): modules = ['_sqlite3'] @@ -216,19 +227,19 @@ for file in ['_testcapimodule.c', '_ctypes_test.c']: shutil.copyfile(str(basedir.join('lib_pypy', file)), str(pypydir.join('lib_pypy', file))) - try: + if 0: # disabled license = generate_license(basedir, options) with open(str(pypydir.join('LICENSE')), 'w') as LICENSE: LICENSE.write(license) - except: - # Non-fatal error, use original LICENCE file - import traceback;traceback.print_exc() + else: + # Use original LICENCE file + #import traceback;traceback.print_exc() base_file = str(basedir.join('LICENSE')) with open(base_file) as fid: license = fid.read() with open(str(pypydir.join('LICENSE')), 'w') as LICENSE: LICENSE.write(license) - retval = -1 + #retval = -1 # spdir = pypydir.ensure('site-packages', dir=True) shutil.copy(str(basedir.join('site-packages', 'README')), str(spdir)) @@ -321,7 +332,8 @@ parser.add_argument('--archive-name', dest='name', type=str, default='', help='pypy-VER-PLATFORM') parser.add_argument('--license_base', type=str, default=license_base, - help='where to start looking for third party upstream licensing info') + #help='where to start looking for third party upstream licensing info') + help='(ignored)') parser.add_argument('--builddir', type=str, default='', help='tmp dir for packaging') parser.add_argument('--targetdir', type=str, default='', @@ -356,24 +368,6 @@ return create_package(basedir, options) -third_party_header = '''\n\nLicenses and Acknowledgements for Incorporated Software -======================================================= - -This section is an incomplete, but growing list of licenses and acknowledgements -for third-party software incorporated in the PyPy distribution. - -''' - -gdbm_bit = '''gdbm ----- - -The gdbm module includes code from gdbm.h, which is distributed under the terms -of the GPL license version 2 or any later version. Thus the gdbm module, provided in -the file lib_pypy/gdbm.py, is redistributed under the terms of the GPL license as -well. -''' - - if __name__ == '__main__': import sys if sys.platform == 'win32': diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -931,6 +931,7 @@ guard, fcond) fcond = asm_operations_with_guard[opnum](self, op, guard, arglocs, regalloc, fcond) + assert fcond is not None regalloc.next_instruction() regalloc.possibly_free_vars_for_op(guard) regalloc.possibly_free_vars(guard.getfailargs()) @@ -941,6 +942,7 @@ if arglocs is not None: fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) + assert fcond is not None if op.is_guard(): regalloc.possibly_free_vars(op.getfailargs()) if op.result: diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -25,7 +25,7 @@ from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale -from rpython.jit.metainterp.history import (Box, AbstractFailDescr, +from rpython.jit.metainterp.history import (Box, AbstractFailDescr, ConstInt, INT, FLOAT, REF) from rpython.jit.metainterp.history import TargetToken from rpython.jit.metainterp.resoperation import rop @@ -578,6 +578,7 @@ return fcond emit_op_setfield_raw = emit_op_setfield_gc + emit_op_zero_ptr_field = emit_op_setfield_gc def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs @@ -1174,3 +1175,87 @@ self.mc.VMOV_cs(r.svfp_ip.value, arg.value) self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond + + #from ../x86/regalloc.py:1388 + def emit_op_zero_array(self, op, arglocs, regalloc, fcond): + from rpython.jit.backend.llsupport.descr import unpack_arraydescr + assert len(arglocs) == 0 + length_box = op.getarg(2) + if isinstance(length_box, ConstInt) and length_box.getint() == 0: + return fcond # nothing to do + itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) + sibox = args[1] + if isinstance(sibox, ConstInt): + startindex_loc = None + startindex = sibox.getint() + assert startindex >= 0 + else: + startindex_loc = regalloc.rm.make_sure_var_in_reg(sibox, args) + startindex = -1 + + # base_loc and startindex_loc are in two regs here (or they are + # immediates). Compute the dstaddr_loc, which is the raw + # address that we will pass as first argument to memset(). + # It can be in the same register as either one, but not in + # args[2], because we're still needing the latter. + dstaddr_box = TempBox() + dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, [args[2]]) + if startindex >= 0: # a constant + ofs = baseofs + startindex * itemsize + reg = base_loc.value + else: + self.mc.gen_load_int(r.ip.value, itemsize) + self.mc.MLA(dstaddr_loc.value, r.ip.value, + startindex_loc.value, base_loc.value) + ofs = baseofs + reg = dstaddr_loc.value + if check_imm_arg(ofs): + self.mc.ADD_ri(dstaddr_loc.value, reg, imm=ofs) + else: + self.mc.gen_load_int(r.ip.value, ofs) + self.mc.ADD_rr(dstaddr_loc.value, reg, r.ip.value) + + if (isinstance(length_box, ConstInt) and + length_box.getint() <= 14 and # same limit as GCC + itemsize in (4, 2, 1)): + # Inline a series of STR operations, starting at 'dstaddr_loc'. + # XXX we could optimize STRB/STRH into STR, but this needs care: + # XXX it only works if startindex_loc is a constant, otherwise + # XXX we'd be doing unaligned accesses + self.mc.gen_load_int(r.ip.value, 0) + for i in range(length_box.getint()): + if itemsize == 4: + self.mc.STR_ri(r.ip.value, dstaddr_loc.value, imm=i*4) + elif itemsize == 2: + self.mc.STRH_ri(r.ip.value, dstaddr_loc.value, imm=i*2) + else: + self.mc.STRB_ri(r.ip.value, dstaddr_loc.value, imm=i*1) + + else: + if isinstance(length_box, ConstInt): + length_loc = imm(length_box.getint() * itemsize) + else: + # load length_loc in a register different than dstaddr_loc + length_loc = regalloc.rm.make_sure_var_in_reg(length_box, + [dstaddr_box]) + if itemsize > 1: + # we need a register that is different from dstaddr_loc, + # but which can be identical to length_loc (as usual, + # only if the length_box is not used by future operations) + bytes_box = TempBox() + bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, + [dstaddr_box]) + self.mc.gen_load_int(r.ip.value, itemsize) + self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) + length_box = bytes_box + length_loc = bytes_loc + # + # call memset() + regalloc.before_call() + self.simple_call_no_collect(imm(self.memset_addr), + [dstaddr_loc, imm(0), length_loc]) + regalloc.rm.possibly_free_var(length_box) + regalloc.rm.possibly_free_var(dstaddr_box) + return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -818,8 +818,11 @@ def prepare_op_setfield_gc(self, op, fcond): boxes = op.getarglist() + ofs, size, sign = unpack_fielddescr(op.getdescr()) + return self._prepare_op_setfield(boxes, ofs, size) + + def _prepare_op_setfield(self, boxes, ofs, size): a0, a1 = boxes - ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.make_sure_var_in_reg(a0, boxes) value_loc = self.make_sure_var_in_reg(a1, boxes) ofs_size = default_imm_size if size < 8 else VMEM_imm_size @@ -832,6 +835,11 @@ prepare_op_setfield_raw = prepare_op_setfield_gc + def prepare_op_zero_ptr_field(self, op, fcond): + a0 = op.getarg(0) + ofs = op.getarg(1).getint() + return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) + def prepare_op_getfield_gc(self, op, fcond): a0 = op.getarg(0) ofs, size, sign = unpack_fielddescr(op.getdescr()) @@ -988,6 +996,7 @@ prepare_op_copystrcontent = void prepare_op_copyunicodecontent = void + prepare_op_zero_array = void def prepare_op_unicodelen(self, op, fcond): l0 = self.make_sure_var_in_reg(op.getarg(0)) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -225,6 +225,7 @@ 'i': 0, 'f': 0.0} + class LLGraphCPU(model.AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts supports_floats = True @@ -641,6 +642,11 @@ def bh_new_array(self, length, arraydescr): array = lltype.malloc(arraydescr.A, length, zero=True) + assert getkind(arraydescr.A.OF) != 'ref' # getkind crashes on structs + return lltype.cast_opaque_ptr(llmemory.GCREF, array) + + def bh_new_array_clear(self, length, arraydescr): + array = lltype.malloc(arraydescr.A, length, zero=True) return lltype.cast_opaque_ptr(llmemory.GCREF, array) def bh_classof(self, struct): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -1,5 +1,5 @@ from rpython.jit.backend.llsupport import jitframe -from rpython.jit.backend.llsupport.memcpy import memcpy_fn +from rpython.jit.backend.llsupport.memcpy import memcpy_fn, memset_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, ConstInt, BoxInt, AbstractFailDescr) @@ -63,6 +63,7 @@ def __init__(self, cpu, translate_support_code=False): self.cpu = cpu self.memcpy_addr = 0 + self.memset_addr = 0 self.rtyper = cpu.rtyper self._debug = False @@ -79,6 +80,7 @@ else: self.gc_size_of_header = WORD # for tests self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) + self.memset_addr = self.cpu.cast_ptr_to_int(memset_fn) self._build_failure_recovery(False, withfloats=False) self._build_failure_recovery(True, withfloats=False) self._build_wb_slowpath(False) diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -35,9 +35,11 @@ size = 0 # help translation tid = llop.combine_ushort(lltype.Signed, 0, 0) - def __init__(self, size, count_fields_if_immut=-1): + def __init__(self, size, count_fields_if_immut=-1, + gc_fielddescrs=None): self.size = size self.count_fields_if_immut = count_fields_if_immut + self.gc_fielddescrs = gc_fielddescrs def count_fields_if_immutable(self): return self.count_fields_if_immut @@ -58,10 +60,13 @@ except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) + gc_fielddescrs = heaptracker.gc_fielddescrs(gccache, STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut, + gc_fielddescrs) else: - sizedescr = SizeDescr(size, count_fields_if_immut) + sizedescr = SizeDescr(size, count_fields_if_immut, + gc_fielddescrs) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr @@ -95,6 +100,9 @@ self.field_size = field_size self.flag = flag + def __repr__(self): + return 'FieldDescr<%s>' % (self.name,) + def is_pointer_field(self): return self.flag == FLAG_POINTER diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -18,10 +18,12 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler from rpython.memory.gctransform import asmgcroot +from rpython.jit.codewriter.effectinfo import EffectInfo # ____________________________________________________________ class GcLLDescription(GcCache): + malloc_zero_filled = True def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) @@ -36,6 +38,8 @@ def _setup_str(self): self.str_descr = get_array_descr(self, rstr.STR) self.unicode_descr = get_array_descr(self, rstr.UNICODE) + self.str_hash_descr = get_field_descr(self, rstr.STR, 'hash') + self.unicode_hash_descr = get_field_descr(self, rstr.UNICODE, 'hash') def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): """Generates a variant of malloc with the given name and the given @@ -118,7 +122,8 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(jitframe.JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth', + 'jf_savedata', 'jf_forward']: setattr(descrs, name, cpu.fielddescrof(jitframe.JITFRAME, name)) descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, 'jfi_frame_size') @@ -377,6 +382,7 @@ from rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc + self.malloc_zero_filled = self.GCClass.malloc_zero_filled self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() @@ -410,9 +416,9 @@ if self.DEBUG: self._random_usage_of_xmm_registers() type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here - return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) + return llop1.do_malloc_fixedsize(llmemory.GCREF, + type_id, size, + False, False, False) self.generate_function('malloc_nursery', malloc_nursery_slowpath, [lltype.Signed]) @@ -455,7 +461,7 @@ def malloc_str(length): type_id = llop.extract_ushort(llgroup.HALFWORD, str_type_id) - return llop1.do_malloc_varsize_clear( + return llop1.do_malloc_varsize( llmemory.GCREF, type_id, length, str_basesize, str_itemsize, str_ofs_length) @@ -464,7 +470,7 @@ def malloc_unicode(length): type_id = llop.extract_ushort(llgroup.HALFWORD, unicode_type_id) - return llop1.do_malloc_varsize_clear( + return llop1.do_malloc_varsize( llmemory.GCREF, type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -45,8 +45,9 @@ # detailed explanation how it is on your architecture def jitframe_allocate(frame_info): - frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth, zero=True) + frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info + frame.jf_extra_stack_depth = 0 return frame def jitframe_resolve(frame): diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -14,6 +14,7 @@ get_call_descr, get_interiorfield_descr, FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) +from rpython.jit.backend.llsupport.memcpy import memset_fn from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager from rpython.rlib.unroll import unrolling_iterable @@ -607,6 +608,7 @@ def bh_new_array(self, length, arraydescr): return self.gc_ll_descr.gc_malloc_array(length, arraydescr) + bh_new_array_clear = bh_new_array def bh_newstr(self, length): return self.gc_ll_descr.gc_malloc_str(length) diff --git a/rpython/jit/backend/llsupport/memcpy.py b/rpython/jit/backend/llsupport/memcpy.py --- a/rpython/jit/backend/llsupport/memcpy.py +++ b/rpython/jit/backend/llsupport/memcpy.py @@ -3,3 +3,6 @@ memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, rffi.SIZE_T], lltype.Void, sandboxsafe=True, _nowrapper=True) +memset_fn = rffi.llexternal('memset', [llmemory.Address, rffi.INT, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True, _nowrapper=True) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,12 +1,13 @@ from rpython.rlib import rgc from rpython.rlib.rarithmetic import ovfcheck -from rpython.rtyper.lltypesystem import llmemory +from rpython.rtyper.lltypesystem import llmemory, lltype from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr, BoxInt from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import WORD -from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr +from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr,\ + FLAG_POINTER from rpython.jit.metainterp.history import JitCellToken FLAG_ARRAY = 0 @@ -38,6 +39,7 @@ _op_malloc_nursery = None _v_last_malloced_nursery = None c_zero = ConstInt(0) + c_null = ConstPtr(lltype.nullptr(llmemory.GCREF.TO)) def __init__(self, gc_ll_descr, cpu): self.gc_ll_descr = gc_ll_descr @@ -45,6 +47,9 @@ self.newops = [] self.known_lengths = {} self.write_barrier_applied = {} + self.delayed_zero_setfields = {} + self.last_zero_arrays = [] + self.setarrayitems_occurred = {} # {box: {set-of-indexes}} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -60,6 +65,8 @@ if op.is_malloc(): self.handle_malloc_operation(op) continue + if op.is_guard(): + self.emit_pending_zeros() elif op.can_malloc(): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: @@ -68,18 +75,30 @@ # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: + self.consider_setfield_gc(op) self.handle_write_barrier_setfield(op) continue if op.getopnum() == rop.SETINTERIORFIELD_GC: self.handle_write_barrier_setinteriorfield(op) continue if op.getopnum() == rop.SETARRAYITEM_GC: + self.consider_setarrayitem_gc(op) self.handle_write_barrier_setarrayitem(op) continue + else: + # this is dead code, but in case we have a gc that does + # not have a write barrier and does not zero memory, we would + # need to clal it + if op.getopnum() == rop.SETFIELD_GC: + self.consider_setfield_gc(op) + elif op.getopnum() == rop.SETARRAYITEM_GC: + self.consider_setarrayitem_gc(op) # ---------- call assembler ----------- if op.getopnum() == rop.CALL_ASSEMBLER: self.handle_call_assembler(op) continue + if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: + self.emit_pending_zeros() # self.newops.append(op) return self.newops @@ -99,7 +118,7 @@ [op.result, ConstInt(classint)], None, descr=self.gc_ll_descr.fielddescr_vtable) self.newops.append(op) - elif opnum == rop.NEW_ARRAY: + elif opnum == rop.NEW_ARRAY or opnum == rop.NEW_ARRAY_CLEAR: descr = op.getdescr() assert isinstance(descr, ArrayDescr) self.handle_new_array(descr, op) @@ -112,6 +131,54 @@ else: raise NotImplementedError(op.getopname()) + def clear_gc_fields(self, descr, result): + if self.gc_ll_descr.malloc_zero_filled: + return + try: + d = self.delayed_zero_setfields[result] + except KeyError: + d = {} + self.delayed_zero_setfields[result] = d + for fielddescr in descr.gc_fielddescrs: + ofs = self.cpu.unpack_fielddescr(fielddescr) + d[ofs] = None + + def consider_setfield_gc(self, op): + offset = self.cpu.unpack_fielddescr(op.getdescr()) + try: + del self.delayed_zero_setfields[op.getarg(0)][offset] + except KeyError: + pass + + def consider_setarrayitem_gc(self, op): + array_box = op.getarg(0) + index_box = op.getarg(1) + if isinstance(array_box, BoxPtr) and isinstance(index_box, ConstInt): + try: + intset = self.setarrayitems_occurred[array_box] + except KeyError: + intset = self.setarrayitems_occurred[array_box] = {} + intset[index_box.getint()] = None + + def clear_varsize_gc_fields(self, kind, descr, result, v_length, opnum): + if self.gc_ll_descr.malloc_zero_filled: + return + if kind == FLAG_ARRAY: + if descr.is_array_of_structs() or descr.is_array_of_pointers(): + assert opnum == rop.NEW_ARRAY_CLEAR + if opnum == rop.NEW_ARRAY_CLEAR: + self.handle_clear_array_contents(descr, result, v_length) + return + if kind == FLAG_STR: + hash_descr = self.gc_ll_descr.str_hash_descr + elif kind == FLAG_UNICODE: + hash_descr = self.gc_ll_descr.unicode_hash_descr + else: + return + op = ResOperation(rop.SETFIELD_GC, [result, self.c_zero], None, + descr=hash_descr) + self.newops.append(op) + def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size @@ -119,6 +186,7 @@ self.gen_initialize_tid(op.result, descr.tid) else: self.gen_malloc_fixedsize(size, descr.tid, op.result) + self.clear_gc_fields(descr, op.result) def handle_new_array(self, arraydescr, op, kind=FLAG_ARRAY): v_length = op.getarg(0) @@ -140,6 +208,8 @@ # might end up being allocated by malloc_external or some # stuff that initializes GC header fields differently self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + self.clear_varsize_gc_fields(kind, op.getdescr(), op.result, + v_length, op.getopnum()) return if (total_size >= 0 and self.gen_malloc_nursery(total_size, op.result)): @@ -149,7 +219,7 @@ self.gen_boehm_malloc_array(arraydescr, v_length, op.result) else: opnum = op.getopnum() - if opnum == rop.NEW_ARRAY: + if opnum == rop.NEW_ARRAY or opnum == rop.NEW_ARRAY_CLEAR: self.gen_malloc_array(arraydescr, v_length, op.result) elif opnum == rop.NEWSTR: self.gen_malloc_str(v_length, op.result) @@ -157,6 +227,21 @@ self.gen_malloc_unicode(v_length, op.result) else: raise NotImplementedError(op.getopname()) + self.clear_varsize_gc_fields(kind, op.getdescr(), op.result, v_length, + op.getopnum()) + + def handle_clear_array_contents(self, arraydescr, v_arr, v_length): + assert v_length is not None + if isinstance(v_length, ConstInt) and v_length.getint() == 0: + return + # the ZERO_ARRAY operation will be optimized according to what + # SETARRAYITEM_GC we see before the next allocation operation. + # See emit_pending_zeros(). + o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], None, + descr=arraydescr) + self.newops.append(o) + if isinstance(v_length, ConstInt): + self.last_zero_arrays.append(o) def gen_malloc_frame(self, frame_info, frame, size_box): descrs = self.gc_ll_descr.getframedescrs(self.cpu) @@ -177,10 +262,25 @@ self.gen_malloc_nursery_varsize_frame(size_box, frame) self.gen_initialize_tid(frame, descrs.arraydescr.tid) length_box = history.BoxInt() - op1 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], - length_box, - descr=descrs.jfi_frame_depth) - self.newops.append(op1) + # we need to explicitely zero all the gc fields, because + # of the unusal malloc pattern + extra_ops = [ + ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + length_box, descr=descrs.jfi_frame_depth), + ResOperation(rop.SETFIELD_GC, [frame, self.c_zero], + None, descr=descrs.jf_extra_stack_depth), + ResOperation(rop.SETFIELD_GC, [frame, self.c_null], + None, descr=descrs.jf_savedata), + ResOperation(rop.SETFIELD_GC, [frame, self.c_null], + None, descr=descrs.jf_force_descr), + ResOperation(rop.SETFIELD_GC, [frame, self.c_null], + None, descr=descrs.jf_descr), + ResOperation(rop.SETFIELD_GC, [frame, self.c_null], + None, descr=descrs.jf_guard_exc), + ResOperation(rop.SETFIELD_GC, [frame, self.c_null], + None, descr=descrs.jf_forward), + ] + self.newops += extra_ops self.gen_initialize_len(frame, length_box, descrs.arraydescr.lendescr) @@ -225,8 +325,42 @@ # forgets the previous MALLOC_NURSERY, if any; and empty the # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. + # it also writes down all the pending zero ptr fields self._op_malloc_nursery = None self.write_barrier_applied.clear() + self.emit_pending_zeros() + + def emit_pending_zeros(self): + # First, try to rewrite the existing ZERO_ARRAY operations from + # the 'last_zero_arrays' list. Note that these operation objects + # are also already in 'newops', which is the point. + for op in self.last_zero_arrays: + assert op.getopnum() == rop.ZERO_ARRAY + box = op.getarg(0) + try: + intset = self.setarrayitems_occurred[box] + except KeyError: + continue + assert op.getarg(1).getint() == 0 # always 'start=0' initially + start = 0 + while start in intset: + start += 1 + op.setarg(1, ConstInt(start)) + stop = op.getarg(2).getint() + assert start <= stop + while stop > start and (stop - 1) in intset: + stop -= 1 + op.setarg(2, ConstInt(stop - start)) + # ^^ may be ConstInt(0); then the operation becomes a no-op + del self.last_zero_arrays[:] + self.setarrayitems_occurred.clear() + # + # Then write the ZERO_PTR_FIELDs that are still pending + for v, d in self.delayed_zero_setfields.iteritems(): + for ofs in d.iterkeys(): + op = ResOperation(rop.ZERO_PTR_FIELD, [v, ConstInt(ofs)], None) + self.newops.append(op) + self.delayed_zero_setfields.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" @@ -338,7 +472,8 @@ def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. - If that fails, generate a plain CALL_MALLOC_GC instead. + If that succeeds, return True; you still need to write the tid. + If that fails, return False. """ size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -19,6 +19,8 @@ assert descr_t.size == symbolic.get_size(T, False) assert descr_s.count_fields_if_immutable() == -1 assert descr_t.count_fields_if_immutable() == -1 + assert descr_t.gc_fielddescrs == [] + assert len(descr_s.gc_fielddescrs) == 1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # @@ -26,6 +28,11 @@ assert isinstance(descr_s.size, Symbolic) assert descr_s.count_fields_if_immutable() == -1 + PARENT = lltype.Struct('P', ('x', lltype.Ptr(T))) + STRUCT = lltype.GcStruct('S', ('parent', PARENT), ('y', lltype.Ptr(T))) + descr_struct = get_size_descr(c0, STRUCT) + assert len(descr_struct.gc_fielddescrs) == 2 + def test_get_size_descr_immut(): S = lltype.GcStruct('S', hints={'immutable': True}) T = lltype.GcStruct('T', ('parent', S), diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -59,7 +59,7 @@ x += self.gcheaderbuilder.size_gc_header return x, tid - def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, + def do_malloc_fixedsize(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr @@ -70,7 +70,9 @@ self.record.append(("fixedsize", repr(size), tid, p)) return p - def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, + do_malloc_fixedsize_clear = do_malloc_fixedsize + + def do_malloc_varsize(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length @@ -80,6 +82,8 @@ repr(offset_to_length), p)) return p + do_malloc_varsize_clear = do_malloc_varsize + def _write_barrier_failing_case(self, adr_struct): self.record.append(('barrier', adr_struct)) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -69,6 +69,8 @@ unicodedescr = self.gc_ll_descr.unicode_descr strlendescr = strdescr.lendescr unicodelendescr = unicodedescr.lendescr + strhashdescr = self.gc_ll_descr.str_hash_descr + unicodehashdescr = self.gc_ll_descr.unicode_hash_descr casmdescr = JitCellToken() clt = FakeLoopToken() @@ -82,10 +84,15 @@ jfi_frame_depth = framedescrs.jfi_frame_depth jfi_frame_size = framedescrs.jfi_frame_size jf_frame_info = framedescrs.jf_frame_info + jf_savedata = framedescrs.jf_savedata + jf_force_descr = framedescrs.jf_force_descr + jf_descr = framedescrs.jf_descr + jf_guard_exc = framedescrs.jf_guard_exc + jf_forward = framedescrs.jf_forward + jf_extra_stack_depth = framedescrs.jf_extra_stack_depth signedframedescr = self.cpu.signedframedescr floatframedescr = self.cpu.floatframedescr casmdescr.compiled_loop_token = clt - tzdescr = None # noone cares # namespace.update(locals()) # @@ -123,6 +130,9 @@ def unpack_arraydescr_size(self, d): return 0, d.itemsize, 0 + def unpack_fielddescr(self, d): + return d.offset + def arraydescrof(self, ARRAY): try: return self._cache[ARRAY] @@ -144,7 +154,7 @@ def setup_method(self, meth): class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): - return SizeDescrWithVTable(102) + return SizeDescrWithVTable(102, gc_fielddescrs=[]) self.cpu = FakeCPU() self.gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -277,10 +287,11 @@ really_not_translated=True) self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( lambda cpu: True) + self.gc_ll_descr.malloc_zero_filled = False # class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): - descr = SizeDescrWithVTable(104) + descr = SizeDescrWithVTable(104, gc_fielddescrs=[]) descr.tid = 9315 return descr self.cpu = FakeCPU() @@ -313,6 +324,7 @@ setfield_gc(p1, 5678, descr=tiddescr) p2 = int_add(p1, %(tdescr.size)d) setfield_gc(p2, 1234, descr=tiddescr) + zero_ptr_field(p1, %(tdescr.gc_fielddescrs[0].offset)s) jump() """) @@ -422,6 +434,7 @@ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) setfield_gc(p0, i0, descr=strlendescr) + setfield_gc(p0, 0, descr=strhashdescr) jump(i0) """) @@ -545,15 +558,19 @@ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) + setfield_gc(p0, 0, descr=strhashdescr) p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) + setfield_gc(p1, 0, descr=unicodehashdescr) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) + setfield_gc(p2, 0, descr=unicodehashdescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) setfield_gc(p3, i2, descr=strlendescr) + setfield_gc(p3, 0, descr=strhashdescr) jump() """) @@ -587,7 +604,7 @@ self.gc_ll_descr.max_size_of_young_obj = 2000 self.check_rewrite(""" [i2, p3] - p1 = new_array(129, descr=cdescr) + p1 = new_array_clear(129, descr=cdescr) call(123456) setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() @@ -597,6 +614,7 @@ %(cdescr.basesize + 129 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) + zero_array(p1, 0, 129, descr=cdescr) call(123456) cond_call_gc_wb(p1, descr=wbdescr) setarrayitem_gc(p1, i2, p3, descr=cdescr) @@ -608,7 +626,7 @@ self.gc_ll_descr.max_size_of_young_obj = 2000 self.check_rewrite(""" [i2, p3] - p1 = new_array(130, descr=cdescr) + p1 = new_array_clear(130, descr=cdescr) call(123456) setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() @@ -618,6 +636,7 @@ %(cdescr.basesize + 130 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) + zero_array(p1, 0, 130, descr=cdescr) call(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) setarrayitem_gc(p1, i2, p3, descr=cdescr) @@ -639,7 +658,7 @@ def test_label_makes_size_unknown(self): self.check_rewrite(""" [i2, p3] - p1 = new_array(5, descr=cdescr) + p1 = new_array_clear(5, descr=cdescr) label(p1, i2, p3) setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() @@ -649,6 +668,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) + zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) setarrayitem_gc(p1, i2, p3, descr=cdescr) @@ -709,7 +729,7 @@ def test_initialization_store_array(self): self.check_rewrite(""" [p1, i2] - p0 = new_array(5, descr=cdescr) + p0 = new_array_clear(5, descr=cdescr) setarrayitem_gc(p0, i2, p1, descr=cdescr) jump() """, """ @@ -718,10 +738,168 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) setfield_gc(p0, 8111, descr=tiddescr) setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 0, 5, descr=cdescr) setarrayitem_gc(p0, i2, p1, descr=cdescr) jump() """) + def test_zero_array_reduced_left(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 1, p1, descr=cdescr) + setarrayitem_gc(p0, 0, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 2, 3, descr=cdescr) + setarrayitem_gc(p0, 1, p1, descr=cdescr) + setarrayitem_gc(p0, 0, p2, descr=cdescr) + jump() + """) + + def test_zero_array_reduced_right(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 4, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 0, 3, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 4, p2, descr=cdescr) + jump() + """) + + def test_zero_array_not_reduced_at_all(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 2, p2, descr=cdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 0, 5, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 2, p2, descr=cdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """) + + def test_zero_array_reduced_completely(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 4, p2, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + setarrayitem_gc(p0, 2, p2, descr=cdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 5, 0, descr=cdescr) + setarrayitem_gc(p0, 3, p1, descr=cdescr) + setarrayitem_gc(p0, 4, p2, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + setarrayitem_gc(p0, 2, p2, descr=cdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """) + + def test_zero_array_reduced_left_with_call(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + call(321321) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 1, 4, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + call(321321) + cond_call_gc_wb(p0, descr=wbdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """) + + def test_zero_array_reduced_left_with_label(self): + self.check_rewrite(""" + [p1, p2] + p0 = new_array_clear(5, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + label(p0, p2) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """, """ + [p1, p2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + zero_array(p0, 1, 4, descr=cdescr) + setarrayitem_gc(p0, 0, p1, descr=cdescr) + label(p0, p2) + cond_call_gc_wb_array(p0, 1, descr=wbdescr) + setarrayitem_gc(p0, 1, p2, descr=cdescr) + jump() + """) + + def test_zero_array_varsize(self): + self.check_rewrite(""" + [p1, p2, i3] + p0 = new_array_clear(i3, descr=bdescr) + jump() + """, """ + [p1, p2, i3] + p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) + setfield_gc(p0, i3, descr=blendescr) + zero_array(p0, 0, i3, descr=bdescr) + jump() + """) + + def test_zero_array_varsize_cannot_reduce(self): + self.check_rewrite(""" + [p1, p2, i3] + p0 = new_array_clear(i3, descr=bdescr) + setarrayitem_gc(p0, 0, p1, descr=bdescr) + jump() + """, """ + [p1, p2, i3] + p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) + setfield_gc(p0, i3, descr=blendescr) + zero_array(p0, 0, i3, descr=bdescr) + cond_call_gc_wb_array(p0, 0, descr=wbdescr) + setarrayitem_gc(p0, 0, p1, descr=bdescr) + jump() + """) + def test_initialization_store_potentially_large_array(self): # the write barrier cannot be omitted, because we might get # an array with cards and the GC assumes that the write @@ -751,9 +929,11 @@ [i0] p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) + zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) + setfield_gc(p1, 0, descr=strhashdescr) cond_call_gc_wb(p0, descr=wbdescr) setfield_gc(p0, p1, descr=tzdescr) jump() @@ -770,6 +950,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) + zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) setfield_gc(p0, p1, descr=tzdescr) @@ -800,6 +981,12 @@ p1 = call_malloc_nursery_varsize_frame(i1) setfield_gc(p1, 0, descr=tiddescr) i2 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_depth) + setfield_gc(p1, 0, descr=jf_extra_stack_depth) + setfield_gc(p1, NULL, descr=jf_savedata) + setfield_gc(p1, NULL, descr=jf_force_descr) + setfield_gc(p1, NULL, descr=jf_descr) + setfield_gc(p1, NULL, descr=jf_guard_exc) + setfield_gc(p1, NULL, descr=jf_forward) setfield_gc(p1, i2, descr=framelendescr) setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) setarrayitem_gc(p1, 0, i0, descr=signedframedescr) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -223,7 +223,7 @@ ## return None, f, None def define_compile_framework_1(cls): - # a moving GC. Supports malloc_varsize_nonmovable. Simple test, works + # a moving GC. Simple test, works # without write_barriers and root stack enumeration. def f(n, x, *args): y = X() diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2036,6 +2036,14 @@ 'ref', descr=arraydescr) assert r1.value != r2.value a = lltype.cast_opaque_ptr(lltype.Ptr(A), r1.value) + assert len(a) == 342 + + def test_new_array_clear(self): + A = lltype.GcArray(lltype.Signed) + arraydescr = self.cpu.arraydescrof(A) + r1 = self.execute_operation(rop.NEW_ARRAY_CLEAR, [BoxInt(342)], + 'ref', descr=arraydescr) + a = lltype.cast_opaque_ptr(lltype.Ptr(A), r1.value) assert a[0] == 0 assert len(a) == 342 @@ -4272,9 +4280,6 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 assert self.cpu.get_int_value(deadframe, 0) == 42 - # make sure that force reads the registers from a zeroed piece of - # memory - assert values[0] == 0 def test_compile_bridge_while_running(self): def func(): @@ -4442,3 +4447,99 @@ res = self.execute_operation(rop.CAST_FLOAT_TO_SINGLEFLOAT, [boxfloat(12.5)], 'int') assert res.getint() == struct.unpack("I", struct.pack("f", 12.5))[0] + + def test_zero_ptr_field(self): + from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU + + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("llgraph can't do zero_ptr_field") + T = lltype.GcStruct('T') + S = lltype.GcStruct('S', ('x', lltype.Ptr(T))) + tdescr = self.cpu.sizeof(T) + sdescr = self.cpu.sizeof(S) + fielddescr = self.cpu.fielddescrof(S, 'x') + loop = parse(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p1, p0, descr=fielddescr) + zero_ptr_field(p1, %d) + finish(p1) + """ % fielddescr.offset, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken) + ref = self.cpu.get_ref_value(deadframe, 0) + s = lltype.cast_opaque_ptr(lltype.Ptr(S), ref) + assert not s.x + + def test_zero_ptr_field_2(self): + from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU + + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("llgraph does not do zero_ptr_field") + + from rpython.jit.backend.llsupport import symbolic + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('p', llmemory.GCREF), + ('y', lltype.Signed)) + s = lltype.malloc(S) + s.x = -1296321 + s.y = -4398176 + s_ref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.p = s_ref + ofs_p, _ = symbolic.get_field_token(S, 'p', False) + # + self.execute_operation(rop.ZERO_PTR_FIELD, [ + BoxPtr(s_ref), ConstInt(ofs_p)], # OK for now to assume that the + 'void') # 2nd argument is a constant + # + assert s.x == -1296321 + assert s.p == lltype.nullptr(llmemory.GCREF.TO) + assert s.y == -4398176 + + def test_zero_array(self): + from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU + + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("llgraph does not do zero_array") + + PAIR = lltype.Struct('PAIR', ('a', lltype.Signed), ('b', lltype.Signed)) + for OF in [lltype.Signed, rffi.INT, rffi.SHORT, rffi.UCHAR, PAIR]: + A = lltype.GcArray(OF) + arraydescr = self.cpu.arraydescrof(A) + a = lltype.malloc(A, 100) + addr = llmemory.cast_ptr_to_adr(a) + a_int = heaptracker.adr2int(addr) + a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) + for (start, length) in [(0, 100), (49, 49), (1, 98), + (15, 9), (10, 10), (47, 0), + (0, 4)]: + for cls1 in [ConstInt, BoxInt]: + for cls2 in [ConstInt, BoxInt]: + print 'a_int:', a_int + print 'of:', OF + print 'start:', cls1.__name__, start + print 'length:', cls2.__name__, length + for i in range(100): + if OF == PAIR: + a[i].a = a[i].b = -123456789 + else: + a[i] = rffi.cast(OF, -123456789) + startbox = cls1(start) + lengthbox = cls2(length) + if cls1 == cls2 and start == length: + lengthbox = startbox # same box! + self.execute_operation(rop.ZERO_ARRAY, + [BoxPtr(a_ref), + startbox, + lengthbox], + 'void', descr=arraydescr) + assert len(a) == 100 + for i in range(100): + val = (0 if start <= i < start + length + else -123456789) + if OF == PAIR: + assert a[i].a == a[i].b == val + else: + assert a[i] == rffi.cast(OF, val) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -95,7 +95,10 @@ fields.append(('parent', rclass.OBJECT)) kwds['hints'] = {'vtable': with_vtable._obj} for i in range(r.randrange(1, 5)): - TYPE = self.get_random_primitive_type(r) + if r.random() < 0.1: + TYPE = llmemory.GCREF + else: + TYPE = self.get_random_primitive_type(r) fields.append(('f%d' % i, TYPE)) S = type('S%d' % self.counter, *fields, **kwds) self.counter += 1 @@ -246,13 +249,43 @@ op = ResOperation(self.opnum, [v, c_vtable2], None) return op, False +class ZeroPtrFieldOperation(test_random.AbstractOperation): + def field_descr(self, builder, r): + if getattr(builder.cpu, 'is_llgraph', False): + raise test_random.CannotProduceOperation + v, S = builder.get_structptr_var(r, ) + names = S._names + if names[0] == 'parent': + names = names[1:] + choice = [] + for name in names: + FIELD = getattr(S, name) + if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc(): + choice.append(name) + if not choice: + raise test_random.CannotProduceOperation + name = r.choice(choice) + descr = builder.cpu.fielddescrof(S, name) + return v, descr.offset + + def produce_into(self, builder, r): + v, offset = self.field_descr(builder, r) + builder.do(self.opnum, [v, ConstInt(offset)], None) + class GetFieldOperation(test_random.AbstractOperation): def field_descr(self, builder, r): v, S = builder.get_structptr_var(r, ) names = S._names if names[0] == 'parent': names = names[1:] - name = r.choice(names) + choice = [] + for name in names: + FIELD = getattr(S, name) + if not isinstance(FIELD, lltype.Ptr): + choice.append(name) + if not choice: + raise test_random.CannotProduceOperation + name = r.choice(choice) descr = builder.cpu.fielddescrof(S, name) descr._random_info = 'cpu.fielddescrof(..., %r)' % (name,) descr._random_type = S @@ -274,7 +307,14 @@ array_of_structs=True) array = v.getref(lltype.Ptr(A)) v_index = builder.get_index(len(array), r) - name = r.choice(A.OF._names) + choice = [] + for name in A.OF._names: + FIELD = getattr(A.OF, name) + if not isinstance(FIELD, lltype.Ptr): + choice.append(name) + if not choice: + raise test_random.CannotProduceOperation + name = r.choice(choice) descr = builder.cpu.interiorfielddescrof(A, name) descr._random_info = 'cpu.interiorfielddescrof(..., %r)' % (name,) descr._random_type = A @@ -682,6 +722,7 @@ OPERATIONS.append(GetFieldOperation(rop.GETFIELD_GC)) OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) + OPERATIONS.append(ZeroPtrFieldOperation(rop.ZERO_PTR_FIELD)) OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) @@ -689,7 +730,7 @@ OPERATIONS.append(GetArrayItemOperation(rop.GETARRAYITEM_GC)) OPERATIONS.append(GetArrayItemOperation(rop.GETARRAYITEM_GC)) OPERATIONS.append(SetArrayItemOperation(rop.SETARRAYITEM_GC)) - OPERATIONS.append(NewArrayOperation(rop.NEW_ARRAY)) + OPERATIONS.append(NewArrayOperation(rop.NEW_ARRAY_CLEAR)) OPERATIONS.append(ArrayLenOperation(rop.ARRAYLEN_GC)) OPERATIONS.append(NewStrOperation(rop.NEWSTR)) OPERATIONS.append(NewUnicodeOperation(rop.NEWUNICODE)) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -52,10 +52,13 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None - v_result = execute_nonspec(self.cpu, self.fakemetainterp, - opnum, argboxes, descr) - if isinstance(v_result, Const): - v_result = v_result.clonebox() + if opnum == rop.ZERO_PTR_FIELD: + v_result = None + else: + v_result = execute_nonspec(self.cpu, self.fakemetainterp, + opnum, argboxes, descr) + if isinstance(v_result, Const): + v_result = v_result.clonebox() self.loop.operations.append(ResOperation(opnum, argboxes, v_result, descr)) return v_result diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1486,6 +1486,8 @@ dest_addr = AddressLoc(base_loc, ofs_loc) self.save_into_mem(dest_addr, value_loc, size_loc) + genop_discard_zero_ptr_field = genop_discard_setfield_gc + def genop_discard_setinteriorfield_gc(self, op, arglocs): (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, value_loc) = arglocs @@ -2361,6 +2363,43 @@ elif IS_X86_64: mc.MOVSX32_rj(loc.value, addr) # memory read, sign-extend + def genop_discard_zero_array(self, op, arglocs): + (base_loc, startindex_loc, bytes_loc, + itemsize_loc, baseofs_loc, null_loc) = arglocs + assert isinstance(bytes_loc, ImmedLoc) + assert isinstance(itemsize_loc, ImmedLoc) + assert isinstance(baseofs_loc, ImmedLoc) + assert isinstance(null_loc, RegLoc) and null_loc.is_xmm + baseofs = baseofs_loc.value + nbytes = bytes_loc.value + if valid_addressing_size(itemsize_loc.value): + scale = get_scale(itemsize_loc.value) + else: + assert isinstance(startindex_loc, ImmedLoc) + baseofs += startindex_loc.value * itemsize_loc.value + startindex_loc = imm0 + scale = 0 + null_reg_cleared = False + i = 0 + while i < nbytes: + addr = addr_add(base_loc, startindex_loc, baseofs + i, scale) + current = nbytes - i + if current >= 16: + current = 16 + if not null_reg_cleared: + self.mc.XORPS_xx(null_loc.value, null_loc.value) + null_reg_cleared = True + self.mc.MOVUPS(addr, null_loc) + else: + if current >= WORD: + current = WORD + elif current >= 4: + current = 4 + elif current >= 2: + current = 2 + self.save_into_mem(addr, imm0, imm(current)) + i += current + genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST genop_list = [Assembler386.not_implemented_op] * rop._LAST diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -8,7 +8,8 @@ unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, - RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) + RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op, + valid_addressing_size) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, IS_X86_64) @@ -958,6 +959,13 @@ need_lower_byte=need_lower_byte) self.perform_discard(op, [base_loc, ofs_loc, size_loc, value_loc]) + def consider_zero_ptr_field(self, op): + ofs_loc = imm(op.getarg(1).getint()) + size_loc = imm(WORD) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), []) + value_loc = imm(0) + self.perform_discard(op, [base_loc, ofs_loc, size_loc, value_loc]) + consider_setfield_raw = consider_setfield_gc def consider_setinteriorfield_gc(self, op): @@ -1376,6 +1384,72 @@ def consider_keepalive(self, op): pass + def consider_zero_array(self, op): + itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) + length_box = op.getarg(2) + if isinstance(length_box, ConstInt): + constbytes = length_box.getint() * itemsize + if constbytes == 0: + return # nothing to do + else: + constbytes = -1 + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(args[0], args) + startindex_loc = self.rm.make_sure_var_in_reg(args[1], args) + if 0 <= constbytes <= 16 * 8 and ( + valid_addressing_size(itemsize) or +- isinstance(startindex_loc, ImmedLoc)): + if IS_X86_64: + null_loc = X86_64_XMM_SCRATCH_REG + else: + null_box = TempBox() + null_loc = self.xrm.force_allocate_reg(null_box) + self.xrm.possibly_free_var(null_box) + self.perform_discard(op, [base_loc, startindex_loc, + imm(constbytes), imm(itemsize), + imm(baseofs), null_loc]) + else: + # base_loc and startindex_loc are in two regs here (or they are + # immediates). Compute the dstaddr_loc, which is the raw + # address that we will pass as first argument to memset(). + # It can be in the same register as either one, but not in + # args[2], because we're still needing the latter. + dstaddr_box = TempBox() + dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]]) + itemsize_loc = imm(itemsize) + dst_addr = self.assembler._get_interiorfield_addr( + dstaddr_loc, startindex_loc, itemsize_loc, + base_loc, imm(baseofs)) + self.assembler.mc.LEA(dstaddr_loc, dst_addr) + # + if constbytes >= 0: + length_loc = imm(constbytes) + else: + # load length_loc in a register different than dstaddr_loc + length_loc = self.rm.make_sure_var_in_reg(length_box, + [dstaddr_box]) + if itemsize > 1: + # we need a register that is different from dstaddr_loc, + # but which can be identical to length_loc (as usual, + # only if the length_box is not used by future operations) + bytes_box = TempBox() + bytes_loc = self.rm.force_allocate_reg(bytes_box, + [dstaddr_box]) + b_adr = self.assembler._get_interiorfield_addr( + bytes_loc, length_loc, itemsize_loc, imm0, imm0) + self.assembler.mc.LEA(bytes_loc, b_adr) + length_box = bytes_box + length_loc = bytes_loc + # + # call memset() + self.rm.before_call() + self.xrm.before_call() + self.assembler.simple_call_no_collect( + imm(self.assembler.memset_addr), + [dstaddr_loc, imm0, length_loc]) + self.rm.possibly_free_var(length_box) + self.rm.possibly_free_var(dstaddr_box) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -664,6 +664,7 @@ MOVDQ = _binaryop('MOVDQ') MOVD32 = _binaryop('MOVD32') + MOVUPS = _binaryop('MOVUPS') CALL = _relative_unaryop('CALL') JMP = _relative_unaryop('JMP') diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -634,6 +634,9 @@ MOVD32_xs = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_sp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) + MOVUPS_mx = xmminsn(rex_nw, '\x0F\x11', register(2, 8), mem_reg_plus_const(1)) + MOVUPS_jx = xmminsn(rex_nw, '\x0F\x11', register(2, 8), abs_(1)) + MOVUPS_ax = xmminsn(rex_nw, '\x0F\x11', register(2, 8), mem_reg_plus_scaled_reg_plus_const(1)) # ------------------------------------------------------------ @@ -764,6 +767,7 @@ define_modrm_modes('DIVSD_x*', ['\xF2', rex_nw, '\x0F\x5E', register(1, 8)], regtype='XMM') define_modrm_modes('UCOMISD_x*', ['\x66', rex_nw, '\x0F\x2E', register(1, 8)], regtype='XMM') define_modrm_modes('XORPD_x*', ['\x66', rex_nw, '\x0F\x57', register(1, 8)], regtype='XMM') +define_modrm_modes('XORPS_x*', [rex_nw, '\x0F\x57', register(1, 8)], regtype='XMM') define_modrm_modes('ANDPD_x*', ['\x66', rex_nw, '\x0F\x54', register(1, 8)], regtype='XMM') def define_pxmm_insn(insnname_template, insn_char): diff --git a/rpython/jit/codewriter/assembler.py b/rpython/jit/codewriter/assembler.py --- a/rpython/jit/codewriter/assembler.py +++ b/rpython/jit/codewriter/assembler.py @@ -291,6 +291,7 @@ 'int_sub', 'jit_merge_point', 'new_array', + 'new_array_clear', 'newstr', 'setarrayitem_gc_i', 'setarrayitem_gc_r', diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py From noreply at buildbot.pypy.org Wed Oct 1 00:21:51 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 00:21:51 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes5: off-by-one error for avoiding empty { } output Message-ID: <20140930222151.7B2B41C094F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes5 Changeset: r73749:a96c108bcec4 Date: 2014-09-30 20:36 +0300 http://bitbucket.org/pypy/pypy/changeset/a96c108bcec4/ Log: off-by-one error for avoiding empty { } output diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -596,7 +596,7 @@ else: padding_drop = [] type, name = self.get_declaration() - if name != self.name and self.getvarlength() < 1 and len(data) < 1: + if name != self.name and self.getvarlength() < 1 and len(data) < 2: # an empty union yield '' return From noreply at buildbot.pypy.org Wed Oct 1 00:21:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 00:21:52 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: fix translation Message-ID: <20140930222152.A97B91C094F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73750:6235f6533054 Date: 2014-09-30 21:11 +0300 http://bitbucket.org/pypy/pypy/changeset/6235f6533054/ Log: fix translation diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -76,7 +76,7 @@ if signature[i:i+2] != '->': raise oefmt(space.w_ValueError, '%s at %d in "%s"', "expect '->'", i, signature) - i = _next_non_white_space(signature, i+2) + i = _next_non_white_space(signature, i+2) # parse core dimensions of one argument, # e.g. "()", "(i)", or "(i,j)" if signature[i] != '(': @@ -91,7 +91,7 @@ # no named arg, skip the next loop next_comma = -1 i += 1 - else: + else: next_comma = signature.find(',', i, end_of_arg) if next_comma < 0: next_comma = end_of_arg @@ -100,10 +100,14 @@ name_end = next_comma - 1 while signature[name_end] == ' ' or signature[name_end] == '\t': name_end -= 1 - var_name = signature[i:name_end + 1] - if not all([_is_alnum_underscore(s) for s in var_name]): + if name_end < i: raise oefmt(space.w_ValueError, '%s at %d in "%s"', "expect dimension name", i, signature) + var_name = signature[i:name_end + 1] + for s in var_name: + if not _is_alnum_underscore(s): + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect dimension name", i, signature) if var_name not in var_names: var_names[var_name] = ufunc.core_num_dim_ix ufunc.core_num_dim_ix += 1 @@ -119,7 +123,7 @@ if end_of_arg <= i: next_comma = -1 i = end_of_arg + 1 - else: + else: next_comma = signature.find(',', i, end_of_arg) if next_comma < 0: next_comma = end_of_arg @@ -139,7 +143,7 @@ if cur_arg != ufunc.nargs: raise oefmt(space.w_ValueError, '%s at %d in "%s"', "incomplete signature: not all arguments found", i, signature) - ufunc.core_dim_ixs = ufunc.core_dim_ixs[:cur_core_dim] + ufunc.core_dim_ixs = ufunc.core_dim_ixs[:cur_core_dim] if cur_core_dim == 0: ufunc.core_enabled = 0 - return 0 # for historical reasons, any failures will raise + return 0 # for historical reasons, any failures will raise From noreply at buildbot.pypy.org Wed Oct 1 18:12:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 18:12:41 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes5: document branch Message-ID: <20141001161241.7324E1D2489@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes5 Changeset: r73751:af04bd14990f Date: 2014-10-01 19:04 +0300 http://bitbucket.org/pypy/pypy/changeset/af04bd14990f/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,9 @@ .. this is a revision shortly after release-2.4.x .. startrev: 7026746cbb1b +.. branch: win32-fixes5 +Fix c code generation for msvc so empty "{ }" are avoided in unions, +Avoid re-opening files created with NamedTemporaryFile, +Allocate by 4-byte chunks in rffi_platform, +Skip testing objdump if it does not exist, +and other small adjustments in own tests From noreply at buildbot.pypy.org Wed Oct 1 18:12:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 18:12:42 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes5: close branch to be merged Message-ID: <20141001161242.A8DB41D2489@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes5 Changeset: r73752:0a9b8849b6a1 Date: 2014-10-01 19:04 +0300 http://bitbucket.org/pypy/pypy/changeset/0a9b8849b6a1/ Log: close branch to be merged From noreply at buildbot.pypy.org Wed Oct 1 18:12:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 18:12:44 +0200 (CEST) Subject: [pypy-commit] pypy default: merge win32-fixes5, which fixes own test failures in rpython/translator Message-ID: <20141001161244.1A5431D2489@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73753:7c925baed944 Date: 2014-10-01 19:08 +0300 http://bitbucket.org/pypy/pypy/changeset/7c925baed944/ Log: merge win32-fixes5, which fixes own test failures in rpython/translator and win32 now rounds to 4 on rffi memory size calculation diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,9 @@ .. this is a revision shortly after release-2.4.x .. startrev: 7026746cbb1b +.. branch: win32-fixes5 +Fix c code generation for msvc so empty "{ }" are avoided in unions, +Avoid re-opening files created with NamedTemporaryFile, +Allocate by 4-byte chunks in rffi_platform, +Skip testing objdump if it does not exist, +and other small adjustments in own tests diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -123,11 +123,15 @@ vname = 'pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_typeids_z' length = int(self.gdb.parse_and_eval('*(long*)%s' % vname)) vstart = '(char*)(((long*)%s)+1)' % vname - with tempfile.NamedTemporaryFile('rb') as fobj: + fname = tempfile.mktemp() + try: self.gdb.execute('dump binary memory %s %s %s+%d' % - (fobj.name, vstart, vstart, length)) - data = fobj.read() - return TypeIdsMap(zlib.decompress(data).splitlines(True), self.gdb) + (fname, vstart, vstart, length)) + with open(fname, 'rt') as fobj: + data = fobj.read() + return TypeIdsMap(zlib.decompress(data).splitlines(True), self.gdb) + finally: + os.remove(fname) class TypeIdsMap(object): diff --git a/rpython/rtyper/module/test/test_ll_os_stat.py b/rpython/rtyper/module/test/test_ll_os_stat.py --- a/rpython/rtyper/module/test/test_ll_os_stat.py +++ b/rpython/rtyper/module/test/test_ll_os_stat.py @@ -22,10 +22,10 @@ stat = ll_os_stat.make_win32_stat_impl('stat', ll_os.StringTraits()) wstat = ll_os_stat.make_win32_stat_impl('stat', ll_os.UnicodeTraits()) def check(f): - # msec resolution + # msec resolution, +- rounding error expected = int(os.stat(f).st_mtime*1000) - assert int(stat(f).st_mtime*1000) == expected - assert int(wstat(unicode(f)).st_mtime*1000) == expected + assert abs(int(stat(f).st_mtime*1000) - expected) < 2 + assert abs(int(wstat(unicode(f)).st_mtime*1000) - expected) < 2 check('c:/') check(os.environ['TEMP']) diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -107,15 +107,18 @@ fields is properly aligned.""" global _memory_alignment if _memory_alignment is None: - S = getstruct('struct memory_alignment_test', """ - struct memory_alignment_test { - double d; - void* p; - }; - """, []) - result = S._hints['align'] - assert result & (result-1) == 0, "not a power of two??" - _memory_alignment = result + if sys.platform == 'win32': + _memory_alignment = 4 + else: + S = getstruct('struct memory_alignment_test', """ + struct memory_alignment_test { + double d; + void* p; + }; + """, []) + result = S._hints['align'] + assert result & (result-1) == 0, "not a power of two??" + _memory_alignment = result return _memory_alignment _memory_alignment = None diff --git a/rpython/tool/jitlogparser/test/test_parser.py b/rpython/tool/jitlogparser/test/test_parser.py --- a/rpython/tool/jitlogparser/test/test_parser.py +++ b/rpython/tool/jitlogparser/test/test_parser.py @@ -5,6 +5,7 @@ from rpython.tool.jitlogparser.storage import LoopStorage import py, sys from rpython.jit.backend.detect_cpu import autodetect +from rpython.jit.backend.tool.viewcode import ObjdumpNotFound def parse(input, **kwds): return SimpleParser.parse_from_input(input, **kwds) @@ -193,7 +194,8 @@ py.test.skip('x86 only test') backend_dump = "554889E5534154415541564157488DA500000000488B042590C5540148C7042590C554010000000048898570FFFFFF488B042598C5540148C7042598C554010000000048898568FFFFFF488B0425A0C5540148C70425A0C554010000000048898560FFFFFF488B0425A8C5540148C70425A8C554010000000048898558FFFFFF4C8B3C2550525B0149BB30E06C96FC7F00004D8B334983C60149BB30E06C96FC7F00004D89334981FF102700000F8D000000004983C7014C8B342580F76A024983EE014C89342580F76A024983FE000F8C00000000E9AEFFFFFF488B042588F76A024829E0483B042580EC3C01760D49BB05F30894FC7F000041FFD3554889E5534154415541564157488DA550FFFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4D89C7E954FFFFFF49BB00F00894FC7F000041FFD34440484C3D030300000049BB00F00894FC7F000041FFD34440484C3D070304000000" dump_start = 0x7f3b0b2e63d5 - loop = parse(""" + try: + loop = parse(""" # Loop 0 : loop with 19 ops [p0, p1, p2, p3, i4] debug_merge_point(0, 0, ' #15 COMPARE_OP') @@ -212,6 +214,8 @@ +218: --end of the loop--""", backend_dump=backend_dump, dump_start=dump_start, backend_tp='x86_64') + except ObjdumpNotFound: + py.test.skip('no objdump found on path') cmp = loop.operations[1] assert 'jge' in cmp.asm assert '0x2710' in cmp.asm @@ -276,8 +280,11 @@ py.test.skip('x86 only test') _, loops = import_log(str(py.path.local(__file__).join('..', 'logtest.log'))) - for loop in loops: - loop.force_asm() + try: + for loop in loops: + loop.force_asm() + except ObjdumpNotFound: + py.test.skip('no objdump found on path') assert 'jge' in loops[0].operations[3].asm def test_import_log_2(): @@ -285,8 +292,11 @@ py.test.skip('x86 only test') _, loops = import_log(str(py.path.local(__file__).join('..', 'logtest2.log'))) - for loop in loops: - loop.force_asm() + try: + for loop in loops: + loop.force_asm() + except ObjdumpNotFound: + py.test.skip('no objdump found on path') assert 'cmp' in loops[1].operations[2].asm def test_Op_repr_is_pure(): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -302,8 +302,11 @@ def has_profopt(self): profbased = self.getprofbased() - return (profbased and isinstance(profbased, tuple) + retval = (profbased and isinstance(profbased, tuple) and profbased[0] is ProfOpt) + if retval and self.translator.platform.name == 'msvc': + raise ValueError('Cannot do profile based optimization on MSVC,' + 'it is not supported in free compiler version') def getentrypointptr(self): # XXX check that the entrypoint has the correct diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -521,12 +521,16 @@ return [] lines = list(self.initializationexpr()) type, name = self.get_declaration() - if name != self.name: - lines[0] = '{ ' + lines[0] # extra braces around the 'a' part - lines[-1] += ' }' # of the union - lines[0] = '%s = %s' % ( - cdecl(type, name, self.is_thread_local()), - lines[0]) + if name != self.name and len(lines) < 2: + # a union with length 0 + lines[0] = cdecl(type, name, self.is_thread_local()) + else: + if name != self.name: + lines[0] = '{ ' + lines[0] # extra braces around the 'a' part + lines[-1] += ' }' # of the union + lines[0] = '%s = %s' % ( + cdecl(type, name, self.is_thread_local()), + lines[0]) lines[-1] += ';' return lines @@ -563,7 +567,6 @@ def initializationexpr(self, decoration=''): T = self.getTYPE() is_empty = True - yield '{' defnode = self.db.gettypedefnode(T) data = [] @@ -592,7 +595,13 @@ padding_drop = T._hints['get_padding_drop'](d) else: padding_drop = [] + type, name = self.get_declaration() + if name != self.name and self.getvarlength() < 1 and len(data) < 2: + # an empty union + yield '' + return + yield '{' for name, value in data: if name in padding_drop: continue diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -187,6 +187,8 @@ assert map(float, data.split()) == [0.0, 0.0] def test_profopt(self): + if sys.platform == 'win32': + py.test.skip("no profopt on win32") def add(a,b): return a + b - b + b - b + b - b + b - b + b - b + b - b + b def entry_point(argv): diff --git a/rpython/translator/tool/stdoutcapture.py b/rpython/translator/tool/stdoutcapture.py --- a/rpython/translator/tool/stdoutcapture.py +++ b/rpython/translator/tool/stdoutcapture.py @@ -9,7 +9,8 @@ def __init__(self, mixed_out_err = False): "Start capture of the Unix-level stdout and stderr." - if (not hasattr(os, 'tmpfile') or + if (sys.platform == 'win32' or # os.tmpfile fails, cpython issue #2232 + not hasattr(os, 'tmpfile') or not hasattr(os, 'dup') or not hasattr(os, 'dup2') or not hasattr(os, 'fdopen')): From noreply at buildbot.pypy.org Wed Oct 1 18:12:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 18:12:45 +0200 (CEST) Subject: [pypy-commit] pypy default: skip test of disabled license generation Message-ID: <20141001161245.38FEB1D2489@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73754:111c8c9b6897 Date: 2014-10-01 19:10 +0300 http://bitbucket.org/pypy/pypy/changeset/111c8c9b6897/ Log: skip test of disabled license generation diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -115,6 +115,7 @@ check(pypy, 0755) def test_generate_license(): + py.test.skip('generation of license from platform documentation is disabled') from os.path import dirname, abspath, join, exists class Options(object): pass From noreply at buildbot.pypy.org Wed Oct 1 18:12:46 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Oct 2014 18:12:46 +0200 (CEST) Subject: [pypy-commit] pypy default: pep-8 Message-ID: <20141001161246.5310E1D2489@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73755:2c8357513799 Date: 2014-10-01 19:11 +0300 http://bitbucket.org/pypy/pypy/changeset/2c8357513799/ Log: pep-8 diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -25,7 +25,7 @@ break else: assert False, 'could not find cmd.exe' - else: + else: pypy_c.write("#!/bin/sh") pypy_c.chmod(0755) fake_pypy_c = True @@ -125,7 +125,7 @@ if sys.platform == 'win32': for p in [join(basedir, r'..\..\..\local'), #buildbot join(basedir, r'..\local')]: # pypy/doc/windows.rst - if exists(p): + if exists(p): license_base = p break else: From noreply at buildbot.pypy.org Wed Oct 1 20:57:09 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 1 Oct 2014 20:57:09 +0200 (CEST) Subject: [pypy-commit] pypy default: random trailing whitespace clean up Message-ID: <20141001185709.5BF071C3342@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73756:9a4cb1502f11 Date: 2014-10-01 11:56 -0700 http://bitbucket.org/pypy/pypy/changeset/9a4cb1502f11/ Log: random trailing whitespace clean up diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -88,11 +88,11 @@ _compilation_info_ = eci WORKS = Works() configure(CConfig) - + def checkcompiles(expression, c_header_source, include_dirs=None): """Check if expression compiles. If not, returns False""" return has(expression, c_header_source, include_dirs) - + def sizeof(name, eci, **kwds): class CConfig: _compilation_info_ = eci @@ -109,7 +109,7 @@ if _memory_alignment is None: if sys.platform == 'win32': _memory_alignment = 4 - else: + else: S = getstruct('struct memory_alignment_test', """ struct memory_alignment_test { double d; @@ -132,7 +132,7 @@ self.result = {} self.info = info self.entries = entries - + def get_entry_result(self, entry): try: return self.result[entry] @@ -216,7 +216,6 @@ for key, entry in entries: writer.write_entry(key, entry) - f = writer.f writer.start_main() for key, entry in entries: writer.write_entry_main(key) @@ -361,7 +360,7 @@ self.name = name self.ctype_hint = ctype_hint self.ifdef = ifdef - + def prepare_code(self): if self.ifdef is not None: yield '#ifdef %s' % (self.ifdef,) @@ -540,7 +539,7 @@ class Has(CConfigSingleEntry): def __init__(self, name): self.name = name - + def question(self, ask_gcc): try: ask_gcc(self.name + ';') @@ -772,7 +771,7 @@ On Windows, various configurations may be tried to compile the given eci object. These configurations are a list of dicts, containing: - + - prefix: if an absolute path, will prefix each include and library directories. If a relative path, the external directory is searched for directories which names start @@ -780,13 +779,13 @@ chosen, and becomes the prefix. - include_dir: prefix + include_dir is added to the include directories - + - library_dir: prefix + library_dir is added to the library directories """ if sys.platform != 'win32': configurations = [] - + key = (name, eci) try: return _cache[key] @@ -852,7 +851,7 @@ # since config_external_library does not use a platform kwarg, # somehow using a platform kw arg make the merge fail in # config_external_library - platform = None + platform = None else: library_dir = '' libraries = ['gc', 'dl'] @@ -869,7 +868,7 @@ if __name__ == '__main__': doc = """Example: - + rffi_platform.py -h sys/types.h -h netinet/in.h 'struct sockaddr_in' sin_port INT From noreply at buildbot.pypy.org Thu Oct 2 11:44:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Oct 2014 11:44:08 +0200 (CEST) Subject: [pypy-commit] pypy default: On non-Intel platforms we get the extra logic to check for alignment. Message-ID: <20141002094408.B2FAB1C072F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73757:4efc4e33a16b Date: 2014-10-02 11:43 +0200 http://bitbucket.org/pypy/pypy/changeset/4efc4e33a16b/ Log: On non-Intel platforms we get the extra logic to check for alignment. diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -1,4 +1,5 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +from rpython.rlib.rawstorage import misaligned_is_fine class TestMicroNumPy(BaseTestPyPyC): @@ -15,6 +16,14 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + if misaligned_is_fine: + alignment_check = "" + else: + alignment_check = """ + i93 = int_and(i79, 7) + i94 = int_is_zero(i93) + guard_true(i94, descr=...) + """ assert loop.match(""" i76 = int_lt(i71, 300) guard_true(i76, descr=...) @@ -22,6 +31,7 @@ guard_false(i77, descr=...) i78 = int_mul(i71, i61) i79 = int_add(i55, i78) + """ + alignment_check + """ f80 = raw_load(i67, i79, descr=) i81 = int_add(i71, 1) guard_not_invalidated(descr=...) @@ -44,6 +54,14 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + if misaligned_is_fine: + alignment_check = "" + else: + alignment_check = """ + i97 = int_and(i84, 7) + i98 = int_is_zero(i97) + guard_true(i98, descr=...) + """ assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) @@ -51,6 +69,7 @@ guard_false(i82, descr=...) i83 = int_mul(i76, i64) i84 = int_add(i58, i83) + """ + alignment_check + """ f85 = raw_load(i70, i84, descr=) guard_not_invalidated(descr=...) f86 = float_add(f74, f85) From noreply at buildbot.pypy.org Thu Oct 2 14:03:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Oct 2014 14:03:07 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a line Message-ID: <20141002120307.8C98C1D28AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r544:715f67776563 Date: 2014-10-02 14:03 +0200 http://bitbucket.org/pypy/pypy.org/changeset/715f67776563/ Log: Add a line diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -101,6 +101,7 @@ installer vcredist_x86.exe.)
  • All our downloads, including previous versions. We also have a mirror, but please use only if you have troubles accessing the links above
  • +
  • See below for the sources.
  • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -85,6 +85,7 @@ installer vcredist_x86.exe`_.) * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above +* See below for the sources. .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.4.0-linux.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.4.0-linux64.tar.bz2 From noreply at buildbot.pypy.org Thu Oct 2 14:37:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Oct 2014 14:37:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: Managed to get an almost reliable way to check if two operations do or don't conflict. Will use it Message-ID: <20141002123731.07DCA1D2D9D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1439:5eb7fdfb8edf Date: 2014-10-02 14:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/5eb7fdfb8edf/ Log: Managed to get an almost reliable way to check if two operations do or don't conflict. Will use it to check more complex demos. diff --git a/hashtable/stmcheck.py b/hashtable/stmcheck.py new file mode 100644 --- /dev/null +++ b/hashtable/stmcheck.py @@ -0,0 +1,47 @@ +import py +import thread, time, sys +from __pypy__.thread import * + +try: + from pypyjit import set_param +except ImportError: + def set_param(value): + pass + + +class Conflict(Exception): + pass + + +def check_no_conflict(function_list, repeat=10000): + set_param("off") + # + def fn(index): + function = function_list[index] + sys.stdout.write("*** start %d ***\n" % index) + reset_longest_abort_info() + hint_commit_soon() + i = 0 + while i < repeat: + function() + i += 1 + hint_commit_soon() + abort_info = longest_abort_info() + with atomic: + abort_infos.append(abort_info) + if len(abort_infos) == count: + finished.release() + # + abort_infos = [] + finished = thread.allocate_lock() + finished.acquire() + count = len(function_list) + tlist = [thread.start_new_thread(fn, (i,)) for i in range(count)] + finished.acquire() + for i in range(count): + print 'thread %d: %r' % (i, abort_infos[i]) + if abort_infos != [None] * count: + raise Conflict + +def check_conflict(*args, **kwds): + py.test.raises(Conflict, check_no_conflict, *args, **kwds) diff --git a/hashtable/test_stmcheck.py b/hashtable/test_stmcheck.py new file mode 100644 --- /dev/null +++ b/hashtable/test_stmcheck.py @@ -0,0 +1,31 @@ +import stmcheck + + +def test_no_conflict(): + def t1(): + pass + def t2(): + pass + stmcheck.check_no_conflict([t1, t2]) + +def test_obvious_conflict(): + lst = [0] + def t1(): + lst[0] += 1 + stmcheck.check_conflict([t1, t1]) + +def test_no_conflict_if_writing_to_different_lists(): + lst = [[0], [0]] + def t1(): + lst[0][0] += 1 + def t2(): + lst[1][0] += 1 + stmcheck.check_no_conflict([t1, t2]) + +def test_conflict_even_if_writing_to_different_offsets(): + lst = [0, 0] + def t1(): + lst[0] += 1 + def t2(): + lst[1] += 1 + stmcheck.check_conflict([t1, t2]) From noreply at buildbot.pypy.org Thu Oct 2 14:49:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Oct 2014 14:49:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: update Message-ID: <20141002124931.CC8801C072F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1440:e42feaaec732 Date: 2014-10-02 14:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/e42feaaec732/ Log: update diff --git a/hashtable/design.txt b/hashtable/design.txt --- a/hashtable/design.txt +++ b/hashtable/design.txt @@ -70,5 +70,10 @@ special-cased in the implementation). More precisely, len(), keys(), clear(), etc., set all the lines' read markers; clear() additionally sets all the non-empty lines' write markers (so that it doesn't conflict - with another transaction checking that some key is really not in the + with another transaction checking that some different key is not in the dict). + +* We have an additional pair of markers (read and write) for the 'empty' + flag. It is read whenever we check 'bool(dict)'. It is written only + when we are about to commit and the emptiness state changed in this + transaction. From noreply at buildbot.pypy.org Thu Oct 2 16:07:15 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 2 Oct 2014 16:07:15 +0200 (CEST) Subject: [pypy-commit] pypy default: remove some unnecessary magic Message-ID: <20141002140715.8AB201C14ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73758:632b7f08e536 Date: 2014-10-02 04:36 +0100 http://bitbucket.org/pypy/pypy/changeset/632b7f08e536/ Log: remove some unnecessary magic diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -11,7 +11,7 @@ from rpython.translator.c.primitive import PrimitiveType, name_signed from rpython.rlib import exports from rpython.rlib.rfloat import isfinite, isinf -from rpython.translator.c import extfunc + def needs_gcheader(T): if not isinstance(T, ContainerType): @@ -23,22 +23,15 @@ return False # gcheader already in the first field return True -class defaultproperty(object): - def __init__(self, fget): - self.fget = fget - def __get__(self, obj, cls=None): - if obj is None: - return self - else: - return self.fget(obj) - class Node(object): __slots__ = ("db", ) + def __init__(self, db): self.db = db class NodeWithDependencies(Node): __slots__ = ("dependencies", ) + def __init__(self, db): Node.__init__(self, db) self.dependencies = set() @@ -108,9 +101,9 @@ else: typename = db.gettype(T, who_asks=self) self.fields.append((self.c_struct_field_name(name), typename)) - self.gcinfo # force it to be computed + self.computegcinfo(self.db) - def computegcinfo(self): + def computegcinfo(self, db): # let the gcpolicy do its own setup self.gcinfo = None # unless overwritten below rtti = None @@ -121,9 +114,8 @@ except ValueError: pass if self.varlength is None: - self.db.gcpolicy.struct_setup(self, rtti) + db.gcpolicy.struct_setup(self, rtti) return self.gcinfo - gcinfo = defaultproperty(computegcinfo) def gettype(self): return self.fulltypename @@ -219,7 +211,7 @@ return # setup() was already called, likely by __init__ db = self.db ARRAY = self.ARRAY - self.gcinfo # force it to be computed + self.computegcinfo(db) if self.varlength is not None: self.normalizedtypename = db.gettype(ARRAY, who_asks=self) if needs_gcheader(ARRAY): @@ -229,13 +221,12 @@ self.gcfields.append(gc_field) self.itemtypename = db.gettype(ARRAY.OF, who_asks=self) - def computegcinfo(self): + def computegcinfo(self, db): # let the gcpolicy do its own setup self.gcinfo = None # unless overwritten below if self.varlength is None: self.db.gcpolicy.array_setup(self) return self.gcinfo - gcinfo = defaultproperty(computegcinfo) def gettype(self): return self.fulltypename From noreply at buildbot.pypy.org Thu Oct 2 16:39:09 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 2 Oct 2014 16:39:09 +0200 (CEST) Subject: [pypy-commit] pypy default: clarify dependencies of Node.computegcinfo() Message-ID: <20141002143909.C18A81C072F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73759:fefbbb1439a7 Date: 2014-10-02 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/fefbbb1439a7/ Log: clarify dependencies of Node.computegcinfo() diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -101,9 +101,9 @@ else: typename = db.gettype(T, who_asks=self) self.fields.append((self.c_struct_field_name(name), typename)) - self.computegcinfo(self.db) + self.computegcinfo(self.db.gcpolicy) - def computegcinfo(self, db): + def computegcinfo(self, gcpolicy): # let the gcpolicy do its own setup self.gcinfo = None # unless overwritten below rtti = None @@ -114,7 +114,7 @@ except ValueError: pass if self.varlength is None: - db.gcpolicy.struct_setup(self, rtti) + gcpolicy.struct_setup(self, rtti) return self.gcinfo def gettype(self): @@ -211,7 +211,7 @@ return # setup() was already called, likely by __init__ db = self.db ARRAY = self.ARRAY - self.computegcinfo(db) + self.computegcinfo(db.gcpolicy) if self.varlength is not None: self.normalizedtypename = db.gettype(ARRAY, who_asks=self) if needs_gcheader(ARRAY): @@ -221,11 +221,11 @@ self.gcfields.append(gc_field) self.itemtypename = db.gettype(ARRAY.OF, who_asks=self) - def computegcinfo(self, db): + def computegcinfo(self, gcpolicy): # let the gcpolicy do its own setup self.gcinfo = None # unless overwritten below if self.varlength is None: - self.db.gcpolicy.array_setup(self) + gcpolicy.array_setup(self) return self.gcinfo def gettype(self): From noreply at buildbot.pypy.org Thu Oct 2 17:29:29 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 2 Oct 2014 17:29:29 +0200 (CEST) Subject: [pypy-commit] pypy default: implement list(dct) by just calling the .keys() method Message-ID: <20141002152929.26EC11D276E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73760:d6577a460c28 Date: 2014-10-02 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/d6577a460c28/ Log: implement list(dct) by just calling the .keys() method diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -308,6 +308,9 @@ def rtype_method_items(self, hop): return self._rtype_method_kvi(hop, ll_dict_items) + def rtype_bltn_list(self, hop): + return self._rtype_method_kvi(hop, ll_dict_keys) + def rtype_method_iterkeys(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "keys").newiter(hop) diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -242,6 +242,16 @@ res = self.interpret(func, ())#, view=True) assert res == 14 + def test_list_dict(self): + def func(): + dic = self.newdict() + dic[' 4'] = 1000 + dic[' 8'] = 200 + keys = list(dic) + return ord(keys[0][1]) + ord(keys[1][1]) - 2*ord('0') + len(keys) + res = self.interpret(func, ())#, view=True) + assert res == 14 + def test_dict_inst_keys(self): class Empty: pass From noreply at buildbot.pypy.org Fri Oct 3 00:42:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 3 Oct 2014 00:42:10 +0200 (CEST) Subject: [pypy-commit] pypy default: implement list(ordereddict) as well Message-ID: <20141002224210.072FA1C3342@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73761:9ea8570f1460 Date: 2014-10-02 18:41 -0400 http://bitbucket.org/pypy/pypy/changeset/9ea8570f1460/ Log: implement list(ordereddict) as well diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -305,6 +305,9 @@ def rtype_method_items(self, hop): return self._rtype_method_kvi(hop, ll_dict_items) + def rtype_bltn_list(self, hop): + return self._rtype_method_kvi(hop, ll_dict_keys) + def rtype_method_iterkeys(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "keys").newiter(hop) From noreply at buildbot.pypy.org Fri Oct 3 07:52:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 07:52:58 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20141003055258.6909E1C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r545:670e0f995b99 Date: 2014-10-03 07:53 +0200 http://bitbucket.org/pypy/pypy.org/changeset/670e0f995b99/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $57384 of $105000 (54.7%) + $57748 of $105000 (55.0%)
    diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $49869 of $60000 (83.1%) + $50205 of $60000 (83.7%)
    diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $16253 of $80000 (20.3%) + $19037 of $80000 (23.8%)
    From noreply at buildbot.pypy.org Fri Oct 3 16:01:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 16:01:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix: give a sane error message Message-ID: <20141003140126.235E71C314E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73762:c9685aebe8e0 Date: 2014-10-03 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/c9685aebe8e0/ Log: Test and fix: give a sane error message diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -798,6 +798,7 @@ if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return + self._check_no_vable_array(op.args) # turn the flow graph 'setfield' operation into our own version [v_inst, c_fieldname, v_value] = op.args RESULT = v_value.concretetype diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -1036,6 +1036,21 @@ transform=True) assert str(e.value).startswith("A virtualizable array is passed aroun") + def test_vable_attribute_list_copied_around(self): + class F: + _virtualizable_ = ['vlist[*]'] + vlist = None + def __init__(self, x): + self.vlist = [x] + def g(): + return F(42) + def f(): + f = g() + f.extrastuff = f.vlist + e = py.test.raises(AssertionError, self.encoding_test, f, [], "!", + transform=True) + assert str(e.value).startswith("A virtualizable array is passed aroun") + def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" From noreply at buildbot.pypy.org Fri Oct 3 16:53:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 16:53:01 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Goal: change the collection of markers for profiling. At the moment Message-ID: <20141003145301.D07C21D2360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1441:f844b45baef9 Date: 2014-10-01 09:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/f844b45baef9/ Log: Goal: change the collection of markers for profiling. At the moment it always collects at most one marker per thread. The goal is to change it so that it collects either nothing or everything (controlled by an external condition like the PYPYLOG env var). From noreply at buildbot.pypy.org Fri Oct 3 16:53:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 16:53:02 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: in-progress Message-ID: <20141003145302.EE2B31D2360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1442:0014465e03aa Date: 2014-10-03 16:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/0014465e03aa/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -343,7 +343,7 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - change_timing_state(STM_TIME_RUN_CURRENT); + timing_event(tl, STM_TRANSACTION_START, NULL, NULL); STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; @@ -783,7 +783,7 @@ list_clear(STM_PSEGMENT->modified_old_objects_markers); } -static void _finish_transaction(int attribute_to) +static void _finish_transaction(enum stm_event_e final_event) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -812,9 +812,6 @@ minor_collection(/*commit=*/ true); - /* the call to minor_collection() above leaves us with - STM_TIME_BOOKKEEPING */ - /* synchronize overflow objects living in privatized pages */ push_overflow_objects_from_privatized_pages(); @@ -838,9 +835,9 @@ /* if a major collection is required, do it here */ if (is_major_collection_requested()) { - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(NULL, STM_GC_MAJOR_START, NULL, NULL); major_collection_now_at_safe_point(); - change_timing_state(oldstate); + timing_event(NULL, STM_GC_MAJOR_STOP, NULL, NULL); } /* synchronize modified old objects to other threads */ @@ -867,7 +864,7 @@ } /* done */ - _finish_transaction(STM_TIME_RUN_COMMITTED); + _finish_transaction(STM_TR_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -1052,16 +1049,16 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - int attribute_to = STM_TIME_RUN_ABORTED_OTHER; + enum stm_event_e final_event = STM_TR_ABORT_OTHER; if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ - attribute_to = STM_SEGMENT->nursery_end; + final_event = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(attribute_to); + _finish_transaction(final_event); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -141,7 +141,7 @@ if (is_major_collection_requested()) { /* if still true */ - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(NULL, STM_GC_MAJOR_START, NULL, NULL); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -149,7 +149,7 @@ major_collection_now_at_safe_point(); } - change_timing_state(oldstate); + timing_event(NULL, STM_GC_MAJOR_STOP, NULL, NULL); } s_mutex_unlock(); diff --git a/c7/stm/timing.c b/c7/stm/timing.c deleted file mode 100644 --- a/c7/stm/timing.c +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef _STM_CORE_H_ -# error "must be compiled via stmgc.c" -#endif - - -static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, - double elapsed) -{ - tl->timing[category] += elapsed; - tl->events[category] += 1; -} - -#define TIMING_CHANGE(tl, newstate) \ - double curtime = get_stm_time(); \ - double elasped = curtime - tl->_timing_cur_start; \ - enum stm_time_e oldstate = tl->_timing_cur_state; \ - add_timing(tl, oldstate, elasped); \ - tl->_timing_cur_state = newstate; \ - tl->_timing_cur_start = curtime - -static enum stm_time_e change_timing_state(enum stm_time_e newstate) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, newstate); - return oldstate; -} - -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) -{ - TIMING_CHANGE(tl, newstate); - return elasped; -} - -static void timing_end_transaction(enum stm_time_e attribute_to) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; - add_timing(tl, attribute_to, time_this_transaction); - tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; - - if (attribute_to != STM_TIME_RUN_COMMITTED) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - marker_copy(tl, pseg, attribute_to, time_this_transaction); - } -} - -static const char *timer_names[] = { - "outside transaction", - "run current", - "run committed", - "run aborted write write", - "run aborted write read", - "run aborted inevitable", - "run aborted other", - "wait free segment", - "wait write read", - "wait inevitable", - "wait other", - "sync commit soon", - "bookkeeping", - "minor gc", - "major gc", - "sync pause", -}; - -void stm_flush_timing(stm_thread_local_t *tl, int verbose) -{ - enum stm_time_e category = tl->_timing_cur_state; - uint64_t oldevents = tl->events[category]; - TIMING_CHANGE(tl, category); - tl->events[category] = oldevents; - - assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); - if (verbose > 0) { - int i; - s_mutex_lock(); - fprintf(stderr, "thread %p:\n", tl); - for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %8.3f s\n", - timer_names[i], tl->events[i], (double)tl->timing[i]); - } - fprintf(stderr, " %-24s %6s %11.6f s\n", - "longest recorded marker", "", tl->longest_marker_time); - fprintf(stderr, " \"%.*s\"\n", - (int)_STM_MARKER_LEN, tl->longest_marker_self); - s_mutex_unlock(); - } -} diff --git a/c7/stm/timing.h b/c7/stm/timing.h --- a/c7/stm/timing.h +++ b/c7/stm/timing.h @@ -1,14 +1,11 @@ -#include +void (*stmcb_timing_event)(stm_thread_local_t *, enum stm_event_e, + const char *, const char *); -static inline double get_stm_time(void) +static inline void timing_event(stm_thread_local_t *tl, + enum stm_event_e event, + const char *marker1, + const char *marker2) { - struct timespec tp; - clock_gettime(CLOCK_MONOTONIC, &tp); - return tp.tv_sec + tp.tv_nsec * 0.000000001; + if (stmcb_timing_event != NULL) + stmcb_timing_event(tl, event, marker1, marker2); } - -static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); - -static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -34,6 +34,5 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" -#include "stm/timing.c" #include "stm/marker.c" #include "stm/rewind_setjmp.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,27 +54,32 @@ object_t *ss; }; -enum stm_time_e { - STM_TIME_OUTSIDE_TRANSACTION, - STM_TIME_RUN_CURRENT, - STM_TIME_RUN_COMMITTED, - STM_TIME_RUN_ABORTED_WRITE_WRITE, - STM_TIME_RUN_ABORTED_WRITE_READ, - STM_TIME_RUN_ABORTED_INEVITABLE, - STM_TIME_RUN_ABORTED_OTHER, - STM_TIME_WAIT_FREE_SEGMENT, - STM_TIME_WAIT_WRITE_READ, - STM_TIME_WAIT_INEVITABLE, - STM_TIME_WAIT_OTHER, - STM_TIME_SYNC_COMMIT_SOON, - STM_TIME_BOOKKEEPING, - STM_TIME_MINOR_GC, - STM_TIME_MAJOR_GC, - STM_TIME_SYNC_PAUSE, +/* Profiling events (in the comments: value for marker1, value for marker2) */ +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ + STM_TRANSACTION_START, + STM_TR_COMMIT, + STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ + STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_TR_ABORT_INEVITABLE, /* self cur loc, other turned-inev loc */ + STM_TR_ABORT_OTHER, /* ?, ? */ + + /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ + STM_WT_FREE_SEGMENT, + STM_WT_SYNC_PAUSE, + STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_WT_INEVITABLE, /* self cur loc, other turned-inev loc */ + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_STOP, + STM_GC_MAJOR_START, + STM_GC_MAJOR_STOP, + _STM_TIME_N }; - -#define _STM_MARKER_LEN 80 +#define _STM_MARKER_LEN 128 typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ @@ -90,16 +95,6 @@ /* after an abort, some details about the abort are stored there. (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; - /* timing information, accumulated */ - uint32_t events[_STM_TIME_N]; - float timing[_STM_TIME_N]; - double _timing_cur_start; - enum stm_time_e _timing_cur_state; - /* the marker with the longest associated time so far */ - enum stm_time_e longest_marker_state; - double longest_marker_time; - char longest_marker_self[_STM_MARKER_LEN]; - char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -448,8 +443,10 @@ extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); -extern void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); +extern void (*stmcb_timing_event)(stm_thread_local_t *tl, + enum stm_event_e event, + const char *marker1, + const char *marker2); /* Conventience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -24,12 +24,6 @@ size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; int associated_segment_num; - uint32_t events[]; - float timing[]; - int longest_marker_state; - double longest_marker_time; - char longest_marker_self[]; - char longest_marker_other[]; ...; } stm_thread_local_t; @@ -113,29 +107,39 @@ long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); -#define STM_TIME_OUTSIDE_TRANSACTION ... -#define STM_TIME_RUN_CURRENT ... -#define STM_TIME_RUN_COMMITTED ... -#define STM_TIME_RUN_ABORTED_WRITE_WRITE ... -#define STM_TIME_RUN_ABORTED_WRITE_READ ... -#define STM_TIME_RUN_ABORTED_INEVITABLE ... -#define STM_TIME_RUN_ABORTED_OTHER ... -#define STM_TIME_WAIT_FREE_SEGMENT ... -#define STM_TIME_WAIT_WRITE_READ ... -#define STM_TIME_WAIT_INEVITABLE ... -#define STM_TIME_WAIT_OTHER ... -#define STM_TIME_BOOKKEEPING ... -#define STM_TIME_MINOR_GC ... -#define STM_TIME_MAJOR_GC ... -#define STM_TIME_SYNC_PAUSE ... +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ + STM_TRANSACTION_START, + STM_TR_COMMIT, + STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ + STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_TR_ABORT_INEVITABLE, /* self cur loc, other turned-inev loc */ + STM_TR_ABORT_OTHER, /* ?, ? */ + + /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ + STM_WT_FREE_SEGMENT, + STM_WT_SYNC_PAUSE, + STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_WT_INEVITABLE, /* self cur loc, other turned-inev loc */ + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_STOP, + STM_GC_MAJOR_START, + STM_GC_MAJOR_STOP, + ... +}; void stm_flush_timing(stm_thread_local_t *, int); void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); +void (*stmcb_timing_event)(stm_thread_local_t *tl, + enum stm_event_e event, + const char *marker1, + const char *marker2); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); From noreply at buildbot.pypy.org Fri Oct 3 17:56:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 17:56:54 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: in-progress Message-ID: <20141003155654.23F9D1C02ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1443:0e74d9b2380c Date: 2014-10-03 17:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/0e74d9b2380c/ Log: in-progress diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -181,6 +181,7 @@ if (must_abort()) abort_with_mutex(); + abort(); /* XXX struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num); double elapsed = @@ -188,6 +189,7 @@ STM_TIME_RUN_CURRENT); marker_copy(pseg->pub.running_thread, pseg, wait_category, elapsed); + */ } else if (!contmgr.abort_other) { diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -79,6 +79,7 @@ return _result; } +#if 0 // XXX static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time) @@ -103,6 +104,7 @@ pseg->marker_self[0] = 0; pseg->marker_other[0] = 0; } +#endif static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, uintptr_t marker[2]) diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -4,9 +4,6 @@ static void marker_expand(uintptr_t marker[2], char *segment_base, char *outmarker); static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time); static void marker_contention(int kind, bool abort_other, uint8_t other_segment_num, object_t *obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -572,11 +572,11 @@ stm_safe_point(); - change_timing_state(STM_TIME_MINOR_GC); + timing_event(NULL, STM_GC_MINOR_START, NULL, NULL); _do_minor_collection(commit); - change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); + timing_event(NULL, STM_GC_MINOR_STOP, NULL, NULL); } void stm_collect(long level) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,7 +1,11 @@ -/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER -#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON +/* 'nursery_end' is either NURSERY_END, NSE_SIGxxx, or STM_TR_ABORT_xxx. */ +#define NSE_SIGPAUSE (_STM_NSE_SIGNAL_MAX - 1) +#define NSE_SIGCOMMITSOON (_STM_NSE_SIGNAL_MAX - 2) + +#if !(STM_TR_ABORT_OTHER < NSE_SIGCOMMITSOON) +# error "STM_TR_ABORT_xxx is too large; increase _STM_NSE_SIGNAL_MAX" +#endif static uint32_t highest_overflow_number; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -142,12 +142,11 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + stm_thread_local_t *tl = tl_or_null_if_can_abort; signal_other_to_commit_soon(other_pseg); - change_timing_state_tl(tl_or_null_if_can_abort, - STM_TIME_WAIT_INEVITABLE); + timing_event_wt_inevitable(tl, other_pseg); cond_wait(C_INEVITABLE); - /* don't bother changing the timing state again: the caller - will very soon go to STM_TIME_RUN_CURRENT */ + timing_event(tl, STM_WAIT_DONE, NULL, NULL); } goto restart; } @@ -188,8 +187,9 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ - change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); + timing_event(tl, STM_WT_FREE_SEGMENT, NULL, NULL); cond_wait(C_SEGMENT_FREE); + timing_event(tl, STM_WAIT_DONE, NULL, NULL); /* Return false to the caller, which will call us again */ return false; diff --git a/c7/stm/timing.h b/c7/stm/timing.h --- a/c7/stm/timing.h +++ b/c7/stm/timing.h @@ -9,3 +9,20 @@ if (stmcb_timing_event != NULL) stmcb_timing_event(tl, event, marker1, marker2); } + +static inline void timing_event_wt_inevitable(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *other_pseg) +{ + /* We are not running a transaction yet; can't get the 'self loc' */ + assert(_has_mutex()); + if (stmcb_timing_event != NULL) { + + char outmarker[_STM_MARKER_LEN]; + acquire_marker_lock(other_pseg->pub.segment_base); + marker_expand(other_pseg->marker_inev, other_pseg->pub.segment_base, + outmarker); + release_marker_lock(other_pseg->pub.segment_base); + + stmcb_timing_event(tl, STM_WT_INEVITABLE, NULL, outmarker); + } +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,8 +14,8 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" +#include "stm/marker.h" #include "stm/timing.h" -#include "stm/marker.h" #include "stm/misc.c" #include "stm/list.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,33 +54,6 @@ object_t *ss; }; -/* Profiling events (in the comments: value for marker1, value for marker2) */ -enum stm_event_e { - /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ - STM_TRANSACTION_START, - STM_TR_COMMIT, - STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ - STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_TR_ABORT_INEVITABLE, /* self cur loc, other turned-inev loc */ - STM_TR_ABORT_OTHER, /* ?, ? */ - - /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ - STM_WT_FREE_SEGMENT, - STM_WT_SYNC_PAUSE, - STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_WT_INEVITABLE, /* self cur loc, other turned-inev loc */ - STM_WAIT_DONE, - - /* start and end of GC cycles */ - STM_GC_MINOR_START, - STM_GC_MINOR_STOP, - STM_GC_MAJOR_START, - STM_GC_MAJOR_STOP, - - _STM_TIME_N -}; -#define _STM_MARKER_LEN 128 - typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -150,7 +123,7 @@ #define _STM_CARD_SIZE 32 /* must be >= 32 */ #define _STM_MIN_CARD_COUNT 17 #define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT) -#define _STM_NSE_SIGNAL_MAX _STM_TIME_N +#define _STM_NSE_SIGNAL_MAX 63 #define _STM_FAST_ALLOC (66*1024) @@ -437,6 +410,33 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* Profiling events (in the comments: value for marker1, value for marker2) */ +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ + STM_TRANSACTION_START, + STM_TR_COMMIT, + STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ + STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_TR_ABORT_INEVITABLE, /* self cur loc?, other turned-inev loc */ + STM_TR_ABORT_OTHER, /* ?, ? */ + + /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ + STM_WT_FREE_SEGMENT, + STM_WT_SYNC_PAUSE, + STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ + STM_WT_INEVITABLE, /* self cur loc?, other turned-inev loc */ + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_STOP, + STM_GC_MAJOR_START, + STM_GC_MAJOR_STOP, + + _STM_EVENT_N +}; +#define _STM_MARKER_LEN 128 + /* The markers pushed in the shadowstack are an odd number followed by a regular pointer. When needed, this library invokes this callback to turn this pair into a human-readable explanation. */ From noreply at buildbot.pypy.org Fri Oct 3 18:35:09 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 3 Oct 2014 18:35:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add myself Message-ID: <20141003163509.944541C314E@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5420:523d967e3386 Date: 2014-10-03 18:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/523d967e3386/ Log: add myself diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -14,4 +14,5 @@ Romain Guillebert 19/10-26-10 ??? Manuel Jacob 20/10-26/10 ? (shared hotel room?) Kostia Lopuhin +Antonio Cuni 20/10-26/10 ibis Reduta http://www.ibis.com/gb/hotel-7125-ibis-warszawa-reduta/index.shtml ==================== ============== ======================= From noreply at buildbot.pypy.org Fri Oct 3 20:11:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 3 Oct 2014 20:11:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Leaving the 28th Message-ID: <20141003181114.1EB281C02ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5421:2497b51783f8 Date: 2014-10-03 20:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/2497b51783f8/ Log: Leaving the 28th diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -9,7 +9,7 @@ ==================== ============== ======================= Name Arrive/Depart Accomodation ==================== ============== ======================= -Armin Rigo 20/10-2X/10 with fijal +Armin Rigo 20/10-28/10 with fijal Maciej Fijalkowski 20/10-30/10 private Romain Guillebert 19/10-26-10 ??? Manuel Jacob 20/10-26/10 ? (shared hotel room?) From noreply at buildbot.pypy.org Sat Oct 4 00:44:39 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 4 Oct 2014 00:44:39 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: Create LowLevelType._contains_value() Message-ID: <20141003224439.AA79B1C314E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73763:098a8b6598ba Date: 2014-05-14 19:22 +0100 http://bitbucket.org/pypy/pypy/changeset/098a8b6598ba/ Log: Create LowLevelType._contains_value() diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,6 +191,11 @@ def _is_varsize(self): return False + def _contains_value(self, value): + if self is Void: + return True + return isCompatibleType(typeOf(value), self) + NFOUND = object() class ContainerType(LowLevelType): diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,8 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, - LowLevelType, isCompatibleType) +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -120,14 +119,9 @@ def convert_const(self, value): "Convert the given constant value to the low-level repr of 'self'." - if self.lowleveltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError, TypeError): - realtype = '???' - if realtype != self.lowleveltype: - raise TyperError("convert_const(self = %r, value = %r)" % ( - self, value)) + if not self.lowleveltype._contains_value(value): + raise TyperError("convert_const(self = %r, value = %r)" % ( + self, value)) return value def get_ll_eq_function(self): @@ -356,18 +350,9 @@ lltype = reqtype else: raise TypeError(repr(reqtype)) - # Void Constants can hold any value; - # non-Void Constants must hold a correctly ll-typed value - if lltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError): - realtype = '???' - if not isCompatibleType(realtype, lltype): - raise TyperError("inputconst(reqtype = %s, value = %s):\n" - "expected a %r,\n" - " got a %r" % (reqtype, value, - lltype, realtype)) + if not lltype._contains_value(value): + raise TyperError("inputconst(): expected a %r, got %r" % + (lltype, value)) c = Constant(value) c.concretetype = lltype return c From noreply at buildbot.pypy.org Sat Oct 4 00:44:40 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 4 Oct 2014 00:44:40 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: code cleanup Message-ID: <20141003224440.DEDF71C314E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73764:c95ae034e136 Date: 2014-08-14 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/c95ae034e136/ Log: code cleanup diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -407,7 +407,8 @@ def __ne__(self, other): return not (self == other) - def build_ll_dummy_value(self): + @property + def ll_dummy_value(self): TYPE = self.TYPE try: return self.rtyper.cache_dummy_values[TYPE] @@ -420,8 +421,6 @@ self.rtyper.cache_dummy_values[TYPE] = p return p - ll_dummy_value = property(build_ll_dummy_value) - # logging/warning From noreply at buildbot.pypy.org Sat Oct 4 01:50:58 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 4 Oct 2014 01:50:58 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: kill rpython.tool.staticmethods Message-ID: <20141003235058.70B761C02ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73765:7486044bea6c Date: 2014-10-04 00:50 +0100 http://bitbucket.org/pypy/pypy/changeset/7486044bea6c/ Log: kill rpython.tool.staticmethods diff --git a/rpython/rlib/_stacklet_n_a.py b/rpython/rlib/_stacklet_n_a.py --- a/rpython/rlib/_stacklet_n_a.py +++ b/rpython/rlib/_stacklet_n_a.py @@ -1,12 +1,10 @@ from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import objectmodel, debug from rpython.rtyper.annlowlevel import llhelper -from rpython.tool.staticmethods import StaticMethods -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): + @staticmethod def new(thrd, callback, arg): h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) if not h: @@ -14,20 +12,23 @@ return h new._annspecialcase_ = 'specialize:arg(1)' + @staticmethod def switch(h): h = _c.switch(h) if not h: raise MemoryError return h + @staticmethod def destroy(thrd, h): _c.destroy(thrd._thrd, h) if objectmodel.we_are_translated(): debug.debug_print("not using a framework GC: " "stacklet_destroy() may leak") - is_empty_handle = _c.is_empty_handle + is_empty_handle = staticmethod(_c.is_empty_handle) + @staticmethod def get_null_handle(): return _c.null_handle diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -3,7 +3,6 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.tool.staticmethods import StaticMethods NULL_SUSPSTACK = lltype.nullptr(llmemory.GCREF.TO) @@ -68,9 +67,8 @@ return oldsuspstack -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): + @staticmethod def new(thrd, callback, arg): gcrootfinder.callback = callback thread_handle = thrd._thrd @@ -79,6 +77,7 @@ return get_result_suspstack(h) new._dont_inline_ = True + @staticmethod def switch(suspstack): # suspstack has a handle to target, i.e. where to switch to ll_assert(suspstack != gcrootfinder.oldsuspstack, @@ -91,9 +90,11 @@ return get_result_suspstack(h) switch._dont_inline_ = True + @staticmethod def is_empty_handle(suspstack): return not suspstack + @staticmethod def get_null_handle(): return NULL_SUSPSTACK diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -9,7 +9,6 @@ from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name -from rpython.tool.staticmethods import StaticMethods from rpython.rlib.rstring import UnicodeBuilder @@ -800,10 +799,8 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem -class AbstractLLHelpers: - __metaclass__ = StaticMethods - +class AbstractLLHelpers(object): + @staticmethod def ll_isdigit(s): from rpython.rtyper.annlowlevel import hlstr @@ -815,6 +812,7 @@ return False return True + @staticmethod def ll_isalpha(s): from rpython.rtyper.annlowlevel import hlstr @@ -826,6 +824,7 @@ return False return True + @staticmethod def ll_isalnum(s): from rpython.rtyper.annlowlevel import hlstr @@ -837,14 +836,17 @@ return False return True + @staticmethod def ll_char_isspace(ch): c = ord(ch) return c == 32 or (9 <= c <= 13) # c in (9, 10, 11, 12, 13, 32) + @staticmethod def ll_char_isdigit(ch): c = ord(ch) return c <= 57 and c >= 48 + @staticmethod def ll_char_isalpha(ch): c = ord(ch) if c >= 97: @@ -852,6 +854,7 @@ else: return 65 <= c <= 90 + @staticmethod def ll_char_isalnum(ch): c = ord(ch) if c >= 65: @@ -862,47 +865,54 @@ else: return 48 <= c <= 57 + @staticmethod def ll_char_isupper(ch): c = ord(ch) return 65 <= c <= 90 + @staticmethod def ll_char_islower(ch): c = ord(ch) return 97 <= c <= 122 + @staticmethod def ll_upper_char(ch): if 'a' <= ch <= 'z': ch = chr(ord(ch) - 32) return ch + @staticmethod def ll_lower_char(ch): if 'A' <= ch <= 'Z': ch = chr(ord(ch) + 32) return ch + @staticmethod def ll_char_hash(ch): return ord(ch) + @staticmethod def ll_unichar_hash(ch): return ord(ch) + @classmethod def ll_str_is_true(cls, s): # check if a string is True, allowing for None return bool(s) and cls.ll_strlen(s) != 0 - ll_str_is_true = classmethod(ll_str_is_true) + @classmethod def ll_stritem_nonneg_checked(cls, s, i): if i >= cls.ll_strlen(s): raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_nonneg_checked = classmethod(ll_stritem_nonneg_checked) + @classmethod def ll_stritem(cls, s, i): if i < 0: i += cls.ll_strlen(s) return cls.ll_stritem_nonneg(s, i) - ll_stritem = classmethod(ll_stritem) + @classmethod def ll_stritem_checked(cls, s, i): length = cls.ll_strlen(s) if i < 0: @@ -910,8 +920,8 @@ if i >= length or i < 0: raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_checked = classmethod(ll_stritem_checked) + @staticmethod def parse_fmt_string(fmt): # we support x, d, s, f, [r] it = iter(fmt) @@ -937,6 +947,7 @@ r.append(curstr) return r + @staticmethod def ll_float(ll_str): from rpython.rtyper.annlowlevel import hlstr from rpython.rlib.rfloat import rstring_to_float @@ -961,6 +972,7 @@ assert end >= 0 return rstring_to_float(s[beg:end + 1]) + @classmethod def ll_splitlines(cls, LIST, ll_str, keep_newlines): from rpython.rtyper.annlowlevel import hlstr s = hlstr(ll_str) @@ -991,4 +1003,3 @@ item = cls.ll_stringslice_startstop(ll_str, j, strlen) res.ll_setitem_fast(list_length, item) return res - ll_splitlines = classmethod(ll_splitlines) diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py deleted file mode 100644 --- a/rpython/tool/staticmethods.py +++ /dev/null @@ -1,14 +0,0 @@ -import types -class AbstractMethods(type): - def __new__(cls, cls_name, bases, cls_dict): - for key, value in cls_dict.iteritems(): - if isinstance(value, types.FunctionType): - cls_dict[key] = cls.decorator(value) - return type.__new__(cls, cls_name, bases, cls_dict) - - -class StaticMethods(AbstractMethods): - """ - Metaclass that turns plain methods into staticmethods. - """ - decorator = staticmethod From noreply at buildbot.pypy.org Sat Oct 4 04:17:01 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 4 Oct 2014 04:17:01 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: add missing decorators Message-ID: <20141004021701.38E841C02ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73766:66422d374461 Date: 2014-10-04 02:25 +0100 http://bitbucket.org/pypy/pypy/changeset/66422d374461/ Log: add missing decorators diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -38,12 +38,14 @@ i += s.length() cls.ll_strsetitem_nonneg(s, i, item) + @staticmethod def ll_strsetitem_nonneg(s, i, item): chars = s.chars ll_assert(i >= 0, "negative str getitem index") ll_assert(i < len(chars), "str getitem index out of bound") chars[i] = chr(item) + @staticmethod def ll_stritem_nonneg(s, i): return ord(rstr.LLHelpers.ll_stritem_nonneg(s, i)) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -270,6 +270,7 @@ class LLHelpers(AbstractLLHelpers): from rpython.rtyper.annlowlevel import llstr, llunicode + @staticmethod @jit.elidable def ll_str_mul(s, times): if times < 0: @@ -292,6 +293,7 @@ i += j return newstr + @staticmethod @jit.elidable def ll_char_mul(ch, times): if typeOf(ch) is Char: @@ -308,9 +310,11 @@ j += 1 return newstr + @staticmethod def ll_strlen(s): return len(s.chars) + @staticmethod @signature(types.any(), types.int(), returns=types.any()) def ll_stritem_nonneg(s, i): chars = s.chars @@ -318,6 +322,7 @@ ll_assert(i < len(chars), "str getitem index out of bound") return chars[i] + @staticmethod def ll_chr2str(ch): if typeOf(ch) is Char: malloc = mallocstr @@ -328,6 +333,7 @@ return s # @jit.look_inside_iff(lambda str: jit.isconstant(len(str.chars)) and len(str.chars) == 1) + @staticmethod @jit.oopspec("str.str2unicode(str)") def ll_str2unicode(str): lgt = len(str.chars) @@ -338,6 +344,7 @@ s.chars[i] = cast_primitive(UniChar, str.chars[i]) return s + @staticmethod def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY @@ -347,6 +354,7 @@ b.chars[i] = str.chars[i] return b + @staticmethod @jit.elidable def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 @@ -362,13 +370,17 @@ s.hash = x return x + @staticmethod def ll_length(s): return len(s.chars) + @staticmethod def ll_strfasthash(s): return s.hash # assumes that the hash is already computed + @staticmethod @jit.elidable + @jit.oopspec('stroruni.concat(s1, s2)') def ll_strconcat(s1, s2): len1 = s1.length() len2 = s2.length() @@ -386,8 +398,8 @@ else: newstr.copy_contents(s2, newstr, 0, len1, len2) return newstr - ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' + @staticmethod @jit.elidable def ll_strip(s, ch, left, right): s_len = len(s.chars) @@ -408,6 +420,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_default(s, left, right): s_len = len(s.chars) @@ -428,6 +441,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_multiple(s, s2, left, right): s_len = len(s.chars) @@ -448,6 +462,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_upper(s): s_chars = s.chars @@ -462,6 +477,7 @@ i += 1 return result + @staticmethod @jit.elidable def ll_lower(s): s_chars = s.chars @@ -476,6 +492,7 @@ i += 1 return result + @staticmethod def ll_join(s, length, items): s_chars = s.chars s_len = len(s_chars) @@ -509,7 +526,9 @@ i += 1 return result + @staticmethod @jit.elidable + @jit.oopspec('stroruni.cmp(s1, s2)') def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -531,9 +550,10 @@ return diff i += 1 return len1 - len2 - ll_strcmp.oopspec = 'stroruni.cmp(s1, s2)' + @staticmethod @jit.elidable + @jit.oopspec('stroruni.equal(s1, s2)') def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -551,8 +571,8 @@ return False j += 1 return True - ll_streq.oopspec = 'stroruni.equal(s1, s2)' + @staticmethod @jit.elidable def ll_startswith(s1, s2): len1 = len(s1.chars) @@ -569,11 +589,13 @@ return True + @staticmethod def ll_startswith_char(s, ch): if not len(s.chars): return False return s.chars[0] == ch + @staticmethod @jit.elidable def ll_endswith(s1, s2): len1 = len(s1.chars) @@ -591,11 +613,13 @@ return True + @staticmethod def ll_endswith_char(s, ch): if not len(s.chars): return False return s.chars[len(s.chars) - 1] == ch + @staticmethod @jit.elidable @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find_char(s, ch, start, end): @@ -608,6 +632,7 @@ i += 1 return -1 + @staticmethod @jit.elidable def ll_rfind_char(s, ch, start, end): if end > len(s.chars): @@ -619,6 +644,7 @@ return i return -1 + @staticmethod @jit.elidable def ll_count_char(s, ch, start, end): count = 0 @@ -631,6 +657,7 @@ i += 1 return count + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: @@ -646,6 +673,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: @@ -681,6 +709,7 @@ res = 0 return res + @staticmethod @jit.elidable def ll_search(s1, s2, start, end, mode): count = 0 @@ -768,6 +797,7 @@ return -1 return count + @staticmethod @signature(types.int(), types.any(), returns=types.any()) @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic( items, length)) @@ -802,6 +832,7 @@ i += 1 return result + @staticmethod @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars)) def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder @@ -821,6 +852,7 @@ i += 1 return result + @staticmethod @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) @jit.elidable @@ -836,9 +868,11 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + @staticmethod def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @staticmethod @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): @@ -851,10 +885,12 @@ stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop) + @staticmethod def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) + @staticmethod def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -889,6 +925,7 @@ item.copy_contents(s, item, i, 0, j - i) return res + @staticmethod def ll_split(LIST, s, c, max): count = 1 if max == -1: @@ -920,6 +957,7 @@ item.copy_contents(s, item, prev_pos, 0, last - prev_pos) return res + @staticmethod def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -955,6 +993,7 @@ item.copy_contents(s, item, j, 0, i - j) return res + @staticmethod def ll_rsplit(LIST, s, c, max): count = 1 if max == -1: @@ -986,6 +1025,7 @@ item.copy_contents(s, item, 0, 0, prev_pos) return res + @staticmethod @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1001,6 +1041,7 @@ j += 1 return newstr + @staticmethod @jit.elidable def ll_contains(s, c): chars = s.chars @@ -1012,6 +1053,7 @@ i += 1 return False + @staticmethod @jit.elidable def ll_int(s, base): if not 2 <= base <= 36: @@ -1068,23 +1110,29 @@ # ll_build_push(x, next_string, n-1) # s = ll_build_finish(x) + @staticmethod def ll_build_start(parts_count): return malloc(TEMP, parts_count) + @staticmethod def ll_build_push(builder, next_string, index): builder[index] = next_string + @staticmethod def ll_build_finish(builder): return LLHelpers.ll_join_strs(len(builder), builder) + @staticmethod @specialize.memo() def ll_constant(s): return string_repr.convert_const(s) + @staticmethod @specialize.memo() def ll_constant_unicode(s): return unicode_repr.convert_const(s) + @classmethod def do_stringformat(cls, hop, sourcevarsrepr): s_str = hop.args_s[0] assert s_str.is_constant() @@ -1150,8 +1198,8 @@ hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%' return hop.gendirectcall(cls.ll_join_strs, size, vtemp) - do_stringformat = classmethod(do_stringformat) + @staticmethod @jit.dont_look_inside def ll_string2list(RESLIST, src): length = len(src.chars) From noreply at buildbot.pypy.org Sat Oct 4 14:06:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 14:06:13 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: in-progress Message-ID: <20141004120613.5688A1C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1444:233e7fec61ee Date: 2014-10-04 14:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/233e7fec61ee/ Log: in-progress diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,34 +3,50 @@ #endif -enum contention_kind_e { +/* Here are the possible kinds of contention: - /* A write-write contention occurs when we running our transaction - and detect that we are about to write to an object that another - thread is also writing to. This kind of contention must be - resolved before continuing. This *must* abort one of the two - threads: the caller's thread is not at a safe-point, so cannot - wait! */ - WRITE_WRITE_CONTENTION, + STM_CONTENTION_WRITE_WRITE - /* A write-read contention occurs when we are trying to commit: it + A write-write contention occurs when we are running our + transaction and detect that we are about to write to an object + that another thread is also writing to. This kind of + contention must be resolved before continuing. This *must* + abort one of the two threads: the caller's thread is not at a + safe-point, so cannot wait! + + It is reported as a timing event with the following two markers: + the current thread (i.e. where the second-in-time write occurs); + and the other thread (from its 'modified_old_objects_markers', + where the first-in-time write occurred). + + STM_CONTENTION_WRITE_READ + + A write-read contention occurs when we are trying to commit: it means that an object we wrote to was also read by another transaction. Even though it would seem obvious that we should just abort the other thread and proceed in our commit, a more subtle answer would be in some cases to wait for the other thread to commit first. It would commit having read the old value, and - then we can commit our change to it. */ - WRITE_READ_CONTENTION, + then we can commit our change to it. - /* An inevitable contention occurs when we're trying to become + It is reported as a timing event with only one marker: the + older location of the write that was done by the current thread. + + STM_CONTENTION_INEVITABLE + + An inevitable contention occurs when we're trying to become inevitable but another thread already is. We can never abort the other thread in this case, but we still have the choice to abort - ourselves or pause until the other thread commits. */ - INEVITABLE_CONTENTION, -}; + ourselves or pause until the other thread commits. + + It is reported with two markers, one for the current thread and + one for the other thread. Each marker gives the location that + attempts to make the transaction inevitable. +*/ + struct contmgr_s { - enum contention_kind_e kind; + enum stm_event_e kind; struct stm_priv_segment_info_s *other_pseg; bool abort_other; bool try_sleep; // XXX add a way to timeout, but should handle repeated @@ -99,7 +115,7 @@ static bool contention_management(uint8_t other_segment_num, - enum contention_kind_e kind, + enum stm_event_e kind, object_t *obj) { assert(_has_mutex()); @@ -109,6 +125,9 @@ if (must_abort()) abort_with_mutex(); + /* Report the contention */ + timing_contention(kind, other_segment_num, obj); + /* Who should abort here: this thread, or the other thread? */ struct contmgr_s contmgr; contmgr.kind = kind; @@ -138,19 +157,8 @@ contmgr.abort_other = false; } - - int wait_category = - kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : - STM_TIME_WAIT_OTHER; - - int abort_category = - kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : - kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : - STM_TIME_RUN_ABORTED_OTHER; - - + /* Do one of three things here... + */ if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { others_may_have_run = true; @@ -166,12 +174,26 @@ contmgr.other_pseg->signal_when_done = true; marker_contention(kind, false, other_segment_num, obj); - change_timing_state(wait_category); - /* tell the other to commit ASAP */ signal_other_to_commit_soon(contmgr.other_pseg); + enum stm_event_e wait_category; + switch (kind) { + case WRITE_READ_CONTENTION: + wait_category = STM_WT_WRITE_READ; + break; + case INEVITABLE_CONTENTION: + wait_category = STM_WT_INEVITABLE; + break; + default: + stm_fatalerror("unexpected wait kind: %d", kind); + } + dprintf(("pausing...\n")); + + timing_event(STM_SEGMENT->running_thread, wait_category, .., ..); + ...; + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -124,17 +124,13 @@ dprintf_test(("write_slowpath %p -> mod_old\n", obj)); - /* First change to this old object from this transaction. + /* Add the current marker, recording where we wrote to this object */ + timing_record_write(); + + /* Change to this old object from this transaction. Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); - /* Add the current marker, recording where we wrote to this object */ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->modified_old_objects_markers = - list_append2(STM_PSEGMENT->modified_old_objects_markers, - marker[0], marker[1]); - release_marker_lock(STM_SEGMENT->segment_base); /* We need to privatize the pages containing the object, if they @@ -328,29 +324,24 @@ STM_SEGMENT->transaction_read_version = 1; } -static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) +static uint64_t _global_start_time = 0; + +static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - retry: - if (inevitable) { - wait_for_end_of_inevitable_transaction(tl); - } - - if (!acquire_thread_segment(tl)) - goto retry; + while (!acquire_thread_segment(tl)) + ; /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - timing_event(tl, STM_TRANSACTION_START, NULL, NULL); - STM_PSEGMENT->start_time = tl->_timing_cur_start; + timing_event(tl, STM_TRANSACTION_START); + STM_PSEGMENT->start_time = _global_start_time++; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; - STM_PSEGMENT->marker_inev[1] = 0; - if (inevitable) - marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); + STM_PSEGMENT->marker_inev.object = NULL; + STM_PSEGMENT->transaction_state = TS_REGULAR; #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -399,14 +390,17 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif - _stm_start_transaction(tl, false); + _stm_start_transaction(tl); return repeat_count; } void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + /* used to be more efficient, starting directly an inevitable transaction, + but there is no real point any more, I believe */ s_mutex_lock(); - _stm_start_transaction(tl, true); + _stm_start_transaction(tl); + _stm_become_inevitable("start_inevitable_transaction"); } @@ -449,7 +443,10 @@ return true; } /* we aborted the other transaction without waiting, so - we can just continue */ + we can just break out of this loop on + modified_old_objects and continue with the next + segment */ + break; } })); } @@ -783,13 +780,13 @@ list_clear(STM_PSEGMENT->modified_old_objects_markers); } -static void _finish_transaction(enum stm_event_e final_event) +static void _finish_transaction(enum stm_event_e event) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; /* marker_inev is not needed anymore */ - STM_PSEGMENT->marker_inev[1] = 0; + STM_PSEGMENT->marker_inev.object = NULL; /* reset these lists to NULL for the next transaction */ _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); @@ -797,9 +794,9 @@ list_clear(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); - timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + timing_event(tl, event); - stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } @@ -835,9 +832,9 @@ /* if a major collection is required, do it here */ if (is_major_collection_requested()) { - timing_event(NULL, STM_GC_MAJOR_START, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); major_collection_now_at_safe_point(); - timing_event(NULL, STM_GC_MAJOR_STOP, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } /* synchronize modified old objects to other threads */ @@ -864,7 +861,7 @@ } /* done */ - _finish_transaction(STM_TR_COMMIT); + _finish_transaction(STM_TRANSACTION_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -957,10 +954,6 @@ (int)pseg->transaction_state); } - /* if we don't have marker information already, look up and preserve - the marker information from the shadowstack as a string */ - marker_default_for_abort(pseg); - /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -1049,16 +1042,13 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - enum stm_event_e final_event = STM_TR_ABORT_OTHER; - if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ - final_event = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(final_event); + _finish_transaction(STM_TRANSACTION_ABORT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -1100,8 +1090,8 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - marker_fetch_inev(); - wait_for_end_of_inevitable_transaction(NULL); + timing_fetch_inev(); + wait_for_end_of_inevitable_transaction(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); invoke_and_clear_user_callbacks(0); /* for commit */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -138,7 +138,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - double start_time; + uint64_t start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the @@ -196,10 +196,8 @@ pthread_t running_pthread; #endif - /* Temporarily stores the marker information */ - char marker_self[_STM_MARKER_LEN]; - char marker_other[_STM_MARKER_LEN]; - uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ + /* marker where this thread became inevitable */ + stm_loc_marker_t marker_inev; }; enum /* safe_point */ { diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -187,7 +187,6 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif - strcpy(pr->marker_self, "fork"); tl->shadowstack = NULL; pr->shadowstack_at_start_of_transaction = NULL; stm_rewind_jmp_forget(tl); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -141,7 +141,7 @@ if (is_major_collection_requested()) { /* if still true */ - timing_event(NULL, STM_GC_MAJOR_START, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -149,7 +149,7 @@ major_collection_now_at_safe_point(); } - timing_event(NULL, STM_GC_MAJOR_STOP, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } s_mutex_unlock(); @@ -446,9 +446,9 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->marker_inev[1]) { - uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; - mark_visit_object((object_t *)marker_inev_obj, base); + if (get_priv_segment(j)->marker_inev.segment_base) { + object_t *marker_inev_obj = get_priv_segment(j)->marker_inev.object; + mark_visit_object(marker_inev_obj, base); } } } diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -3,18 +3,11 @@ #endif -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); - -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); - - -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +static void marker_fetch(stm_loc_marker_t *out_marker) { - /* fetch the current marker from the tl's shadow stack, - and return it in 'marker[2]'. */ + /* Fetch the current marker from the 'out_marker->tl's shadow stack, + and return it in 'out_marker->odd_number' and 'out_marker->object'. */ + stm_thread_local_t *tl = out_marker->tl; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; @@ -28,87 +21,32 @@ } if (current != base) { /* found the odd marker */ - marker[0] = (uintptr_t)current[0].ss; - marker[1] = (uintptr_t)current[1].ss; + out_marker->odd_number = (uintptr_t)current[0].ss; + out_marker->object = current[1].ss; } else { /* no marker found */ - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } } -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker) +static void _timing_fetch_inev(void) { - /* Expand the marker given by 'marker[2]' into a full string. This - works assuming that the marker was produced inside the segment - given by 'segment_base'. If that's from a different thread, you - must first acquire the corresponding 'marker_lock'. */ - assert(_has_mutex()); - outmarker[0] = 0; - if (marker[0] == 0) - return; /* no marker entry found */ - if (stmcb_expand_marker != NULL) { - stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], - outmarker, _STM_MARKER_LEN); - } + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); + STM_PSEGMENT->marker_inev.odd_number = marker.odd_number; + STM_PSEGMENT->marker_inev.object = marker.object; } -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +static void marker_fetch_obj_write(object_t *obj, + struct stm_loc_marker_t *out_marker) { - if (pseg->marker_self[0] != 0) - return; /* already collected an entry */ - - uintptr_t marker[2]; - marker_fetch(pseg->pub.running_thread, marker); - marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); - pseg->marker_other[0] = 0; -} - -char *_stm_expand_marker(void) -{ - /* for tests only! */ - static char _result[_STM_MARKER_LEN]; - uintptr_t marker[2]; - _result[0] = 0; - s_mutex_lock(); - marker_fetch(STM_SEGMENT->running_thread, marker); - marker_expand(marker, STM_SEGMENT->segment_base, _result); - s_mutex_unlock(); - return _result; -} - -#if 0 // XXX -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time) -{ - /* Copies the marker information from pseg to tl. This is called - indirectly from abort_with_mutex(), but only if the lost time is - greater than that of the previous recorded marker. By contrast, - pseg->marker_self has been filled already in all cases. The - reason for the two steps is that we must fill pseg->marker_self - earlier than now (some objects may be GCed), but we only know - here the total time it gets attributed. + /* From 'out_marker->tl', fill in 'out_marker->segment_base' and + 'out_marker->odd_number' and 'out_marker->object' from the + marker associated with writing the 'obj'. */ - if (stmcb_debug_print) { - stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); - } - if (time * 0.99 > tl->longest_marker_time) { - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; - memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); - memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); - } - pseg->marker_self[0] = 0; - pseg->marker_other[0] = 0; -} -#endif - -static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, - uintptr_t marker[2]) -{ assert(_has_mutex()); /* here, we acquired the other thread's marker_lock, which means that: @@ -120,80 +58,83 @@ the global mutex_lock at this point too). */ long i; + int in_segment_num = out_marker->tl->associated_segment_num; struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); struct list_s *mlst = pseg->modified_old_objects; struct list_s *mlstm = pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { + assert(list_count(mlstm) <= 2 * list_count(mlst)); + for (i = list_count(mlstm) / 2; --i >= 0; ) { if (list_item(mlst, i) == (uintptr_t)obj) { - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); + out_marker->odd_number = list_item(mlstm, i * 2 + 0); + out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1); return; } } - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj) +static void _timing_record_write(void) { - uintptr_t self_marker[2]; - uintptr_t other_marker[2]; - struct stm_priv_segment_info_s *my_pseg, *other_pseg; + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + long base_count = list_count(STM_PSEGMENT->modified_old_objects); + struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers; + while (list_count(mlstm) < 2 * base_count) { + mlstm = list_append2(mlstm, 0, 0); + } + mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object); + STM_PSEGMENT->modified_old_objects_markers = mlstm; +} + +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj) +{ + struct stm_priv_segment_info_s *other_pseg; other_pseg = get_priv_segment(other_segment_num); - char *my_segment_base = STM_SEGMENT->segment_base; - char *other_segment_base = get_segment_base(other_segment_num); + char *other_segment_base = other_pseg->pub.segment_base; + acquire_marker_lock(other_segment_base); - acquire_marker_lock(other_segment_base); + stm_loc_marker_t markers[2]; /* Collect the location for myself. It's usually the current location, except in a write-read abort, in which case it's the older location of the write. */ - if (kind == WRITE_READ_CONTENTION) - marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + markers[0].tl = STM_SEGMENT->running_thread; + markers[0].segment_base = STM_SEGMENT->segment_base; + + if (kind == STM_CONTENTION_WRITE_READ) + marker_fetch_obj_write(obj, &markers[0]); else - marker_fetch(my_pseg->pub.running_thread, self_marker); - - /* Expand this location into either my_pseg->marker_self or - other_pseg->marker_other, depending on who aborts. */ - marker_expand(self_marker, my_segment_base, - abort_other ? other_pseg->marker_other - : my_pseg->marker_self); + marker_fetch(&markers[0]); /* For some categories, we can also collect the relevant information for the other segment. */ - char *outmarker = abort_other ? other_pseg->marker_self - : my_pseg->marker_other; + markers[1].tl = other_pseg->pub.running_thread; + markers[1].segment_base = other_pseg->pub.segment_base; + switch (kind) { case WRITE_WRITE_CONTENTION: - marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + marker_fetch_obj_write(obj, &markers[1]); break; case INEVITABLE_CONTENTION: - assert(abort_other == false); - other_marker[0] = other_pseg->marker_inev[0]; - other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); - break; - case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); + markers[1].odd_number = other_pseg->marker_inev[0]; + markers[1].object = (object_t *)other_pseg->marker_inev[1]; break; default: - outmarker[0] = 0; + markers[1].odd_number = 0; + markers[1].object = NULL; break; } release_marker_lock(other_segment_base); + + stmcb_timing_event(markers[0].tl, kind, markers); } -static void marker_fetch_inev(void) -{ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->marker_inev[0] = marker[0]; - STM_PSEGMENT->marker_inev[1] = marker[1]; -} + +void (*stmcb_timing_event)(enum stm_event_e event, + stm_loc_marker_t *markers[2]); diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,9 +1,19 @@ -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); -static void marker_fetch_inev(void); -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker); -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); +static void _timing_record_write(void); +static void _timing_fetch_inev(void); +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj); -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj); + +#define timing_event(tl, event) \ + (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) : (void)0) + +#define timing_record_write() \ + (stmcb_timing_event != NULL ? _timing_record_write() : (void)0) + +#define timing_fetch_inev() \ + (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0) + +#define timing_contention(kind, other_segnum, obj) \ + (stmcb_timing_event != NULL ? \ + _timing_contention(kind, other_segnum, obj) : (void)0) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -425,11 +425,13 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->marker_inev[1]) { - uintptr_t *pmarker_inev_obj = (uintptr_t *) + if (STM_PSEGMENT->marker_inev.segment_base) { + assert(STM_PSEGMENT->marker_inev.segment_base == + STM_SEGMENT->segment_base); + object_t **pmarker_inev_obj = (object_t **) REAL_ADDRESS(STM_SEGMENT->segment_base, - &STM_PSEGMENT->marker_inev[1]); - minor_trace_if_young((object_t **)pmarker_inev_obj); + &STM_PSEGMENT->marker_inev.object); + minor_trace_if_young(pmarker_inev_obj); } } @@ -572,11 +574,11 @@ stm_safe_point(); - timing_event(NULL, STM_GC_MINOR_START, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START); _do_minor_collection(commit); - timing_event(NULL, STM_GC_MINOR_STOP, NULL, NULL); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE); } void stm_collect(long level) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -22,8 +22,8 @@ static char *setup_mmap(char *reason, int *map_fd) { char name[128]; - sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", - (long)getpid(), get_stm_time()); + sprintf(name, "/stmgc-c7-bigmem-%ld", + (long)getpid()); /* Create the big shared memory object, and immediately unlink it. There is a small window where if this process is killed the @@ -241,8 +241,6 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; - tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; - tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -123,31 +123,19 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction( - stm_thread_local_t *tl_or_null_if_can_abort) +static void wait_for_end_of_inevitable_transaction(void) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->transaction_state == TS_INEVITABLE) { - if (tl_or_null_if_can_abort == NULL) { - /* handle this case like a contention: it will either - abort us (not the other thread, which is inevitable), - or wait for a while. If we go past this call, then we - waited; in this case we have to re-check if no other - thread is inevitable. */ - inevitable_contention_management(i); - } - else { - /* wait for stm_commit_transaction() to finish this - inevitable transaction */ - stm_thread_local_t *tl = tl_or_null_if_can_abort; - signal_other_to_commit_soon(other_pseg); - timing_event_wt_inevitable(tl, other_pseg); - cond_wait(C_INEVITABLE); - timing_event(tl, STM_WAIT_DONE, NULL, NULL); - } + /* handle this case like a contention: it will either + abort us (not the other thread, which is inevitable), + or wait for a while. If we go past this call, then we + waited; in this case we have to re-check if no other + thread is inevitable. */ + inevitable_contention_management(i); goto restart; } } @@ -187,9 +175,9 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ - timing_event(tl, STM_WT_FREE_SEGMENT, NULL, NULL); + timing_event(tl, STM_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); - timing_event(tl, STM_WAIT_DONE, NULL, NULL); + timing_event(tl, STM_WAIT_DONE); /* Return false to the caller, which will call us again */ return false; @@ -331,7 +319,6 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) return; /* fast path: no safe point requested */ - int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -342,10 +329,6 @@ break; /* no safe point requested */ if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); - } - STM_PSEGMENT->signalled_to_commit_soon = true; stmcb_commit_soon(); if (!pause_signalled) { @@ -362,17 +345,12 @@ #ifdef STM_TESTS abort_with_mutex(); #endif - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); - } + timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; - } - - if (previous_state != -1) { - change_timing_state(previous_state); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); } } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,7 +28,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); +static void wait_for_end_of_inevitable_transaction(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/c7/stm/timing.h b/c7/stm/timing.h deleted file mode 100644 --- a/c7/stm/timing.h +++ /dev/null @@ -1,28 +0,0 @@ -void (*stmcb_timing_event)(stm_thread_local_t *, enum stm_event_e, - const char *, const char *); - -static inline void timing_event(stm_thread_local_t *tl, - enum stm_event_e event, - const char *marker1, - const char *marker2) -{ - if (stmcb_timing_event != NULL) - stmcb_timing_event(tl, event, marker1, marker2); -} - -static inline void timing_event_wt_inevitable(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *other_pseg) -{ - /* We are not running a transaction yet; can't get the 'self loc' */ - assert(_has_mutex()); - if (stmcb_timing_event != NULL) { - - char outmarker[_STM_MARKER_LEN]; - acquire_marker_lock(other_pseg->pub.segment_base); - marker_expand(other_pseg->marker_inev, other_pseg->pub.segment_base, - outmarker); - release_marker_lock(other_pseg->pub.segment_base); - - stmcb_timing_event(tl, STM_WT_INEVITABLE, NULL, outmarker); - } -} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -15,7 +15,6 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/marker.h" -#include "stm/timing.h" #include "stm/misc.c" #include "stm/list.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -410,45 +410,51 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); -/* Profiling events (in the comments: value for marker1, value for marker2) */ +/* Profiling events. In the comments: content of the markers, if any */ enum stm_event_e { - /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ + /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ STM_TRANSACTION_START, - STM_TR_COMMIT, - STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ - STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_TR_ABORT_INEVITABLE, /* self cur loc?, other turned-inev loc */ - STM_TR_ABORT_OTHER, /* ?, ? */ + STM_TRANSACTION_COMMIT, + STM_TRANSACTION_ABORT, - /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ - STM_WT_FREE_SEGMENT, - STM_WT_SYNC_PAUSE, - STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_WT_INEVITABLE, /* self cur loc?, other turned-inev loc */ + /* contention; see details at the start of contention.c */ + STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */ + STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing */ + STM_CONTENTION_INEVITABLE, /* markers: self loc / other inev loc */ + + /* following a contention, we get from the same thread one of: + STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), + or STM_WAIT_CONTENTION (self-wait). */ + STM_ABORTING_OTHER_CONTENTION, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNC_PAUSE, + STM_WAIT_CONTENTION, STM_WAIT_DONE, /* start and end of GC cycles */ STM_GC_MINOR_START, - STM_GC_MINOR_STOP, + STM_GC_MINOR_DONE, STM_GC_MAJOR_START, - STM_GC_MAJOR_STOP, + STM_GC_MAJOR_DONE, _STM_EVENT_N }; -#define _STM_MARKER_LEN 128 /* The markers pushed in the shadowstack are an odd number followed by a - regular pointer. When needed, this library invokes this callback to - turn this pair into a human-readable explanation. */ -extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); -extern void (*stmcb_timing_event)(stm_thread_local_t *tl, + regular pointer. */ +typedef struct { + stm_thread_local_t *tl; + char *segment_base; /* base to interpret the 'object' below */ + uintptr_t odd_number; /* marker odd number, or 0 if marker is missing */ + object_t *object; /* marker object, or NULL if marker is missing */ +} stm_loc_marker_t; +extern void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ enum stm_event_e event, - const char *marker1, - const char *marker2); + stm_loc_marker_t *markers); -/* Conventience macros to push the markers into the shadowstack */ +/* Convenience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ uintptr_t _odd_num = (odd_num); \ assert(_odd_num & 1); \ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -107,39 +107,50 @@ long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); +/* Profiling events. In the comments: content of the markers, if any */ enum stm_event_e { - /* always STM_TRANSACTION_START followed later by one of the STM_TR_xxx */ + /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ STM_TRANSACTION_START, - STM_TR_COMMIT, - STM_TR_ABORT_WRITE_WRITE, /* self write loc, other write loc */ - STM_TR_ABORT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_TR_ABORT_INEVITABLE, /* self cur loc, other turned-inev loc */ - STM_TR_ABORT_OTHER, /* ?, ? */ + STM_TRANSACTION_COMMIT, + STM_TRANSACTION_ABORT, - /* always one STM_WT_xxx followed later by STM_WAIT_DONE */ - STM_WT_FREE_SEGMENT, - STM_WT_SYNC_PAUSE, - STM_WT_WRITE_READ, /* self write loc, other = null; or opposite */ - STM_WT_INEVITABLE, /* self cur loc, other turned-inev loc */ + /* contention; see details at the start of contention.c */ + STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */ + STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing */ + STM_CONTENTION_INEVITABLE, /* markers: self inev loc / other inev loc */ + + /* following a contention, we get from the same thread one of: + STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), + or STM_WAIT_CONTENTION (self-wait). */ + STM_ABORTING_OTHER_CONTENTION, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNC_PAUSE, + STM_WAIT_CONTENTION, STM_WAIT_DONE, /* start and end of GC cycles */ STM_GC_MINOR_START, - STM_GC_MINOR_STOP, + STM_GC_MINOR_DONE, STM_GC_MAJOR_START, - STM_GC_MAJOR_STOP, + STM_GC_MAJOR_DONE, ... }; void stm_flush_timing(stm_thread_local_t *, int); -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); -void (*stmcb_timing_event)(stm_thread_local_t *tl, +typedef struct { + stm_thread_local_t *tl; + /* If segment_base==NULL, the remaining fields are undefined. If non-NULL, + the rest is a marker to interpret from this segment_base addr. */ + char *segment_base; + uintptr_t odd_number; + object_t *object; +} stm_loc_marker_t; +void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ enum stm_event_e event, - const char *marker1, - const char *marker2); + stm_loc_marker_t *markers); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); From noreply at buildbot.pypy.org Sat Oct 4 14:15:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 14:15:14 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Tests start to pass again Message-ID: <20141004121514.98FF01C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1445:4efd6b7175db Date: 2014-10-04 14:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/4efd6b7175db/ Log: Tests start to pass again diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -159,7 +159,7 @@ /* Do one of three things here... */ - if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && + if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { others_may_have_run = true; /* Sleep. @@ -172,28 +172,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; - marker_contention(kind, false, other_segment_num, obj); /* tell the other to commit ASAP */ signal_other_to_commit_soon(contmgr.other_pseg); - enum stm_event_e wait_category; - switch (kind) { - case WRITE_READ_CONTENTION: - wait_category = STM_WT_WRITE_READ; - break; - case INEVITABLE_CONTENTION: - wait_category = STM_WT_INEVITABLE; - break; - default: - stm_fatalerror("unexpected wait kind: %d", kind); - } - dprintf(("pausing...\n")); - timing_event(STM_SEGMENT->running_thread, wait_category, .., ..); - ...; - cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); @@ -202,16 +186,6 @@ if (must_abort()) abort_with_mutex(); - - abort(); /* XXX - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - double elapsed = - change_timing_state_tl(pseg->pub.running_thread, - STM_TIME_RUN_CURRENT); - marker_copy(pseg->pub.running_thread, pseg, - wait_category, elapsed); - */ } else if (!contmgr.abort_other) { @@ -219,16 +193,13 @@ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("abort in contention: kind %d\n", kind)); - STM_SEGMENT->nursery_end = abort_category; - marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = abort_category; - marker_contention(kind, true, other_segment_num, obj); + contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -320,7 +291,8 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); + contention_management(other_segment_num, + STM_CONTENTION_WRITE_WRITE, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -332,10 +304,12 @@ static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, + STM_CONTENTION_WRITE_READ, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); + contention_management(other_segment_num, + STM_CONTENTION_INEVITABLE, NULL); } diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -40,8 +40,7 @@ STM_PSEGMENT->marker_inev.object = marker.object; } -static void marker_fetch_obj_write(object_t *obj, - struct stm_loc_marker_t *out_marker) +static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker) { /* From 'out_marker->tl', fill in 'out_marker->segment_base' and 'out_marker->odd_number' and 'out_marker->object' from the @@ -117,12 +116,12 @@ markers[1].segment_base = other_pseg->pub.segment_base; switch (kind) { - case WRITE_WRITE_CONTENTION: + case STM_CONTENTION_WRITE_WRITE: marker_fetch_obj_write(obj, &markers[1]); break; - case INEVITABLE_CONTENTION: - markers[1].odd_number = other_pseg->marker_inev[0]; - markers[1].object = (object_t *)other_pseg->marker_inev[1]; + case STM_CONTENTION_INEVITABLE: + markers[1].odd_number = other_pseg->marker_inev.odd_number; + markers[1].object = other_pseg->marker_inev.object; break; default: markers[1].odd_number = 0; @@ -130,11 +129,14 @@ break; } + stmcb_timing_event(markers[0].tl, kind, markers); + + /* only release the lock after stmcb_timing_event(), otherwise it could + run into race conditions trying to interpret 'markers[1].object' */ release_marker_lock(other_segment_base); - - stmcb_timing_event(markers[0].tl, kind, markers); } -void (*stmcb_timing_event)(enum stm_event_e event, - stm_loc_marker_t *markers[2]); +void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,10 +1,12 @@ -/* 'nursery_end' is either NURSERY_END, NSE_SIGxxx, or STM_TR_ABORT_xxx. */ -#define NSE_SIGPAUSE (_STM_NSE_SIGNAL_MAX - 1) -#define NSE_SIGCOMMITSOON (_STM_NSE_SIGNAL_MAX - 2) +/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */ +#define NSE_SIGABORT 1 +#define NSE_SIGPAUSE 2 +#define NSE_SIGCOMMITSOON 3 +#define _NSE_NUM_SIGNALS 4 -#if !(STM_TR_ABORT_OTHER < NSE_SIGCOMMITSOON) -# error "STM_TR_ABORT_xxx is too large; increase _STM_NSE_SIGNAL_MAX" +#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX +# error "increase _STM_NSE_SIGNAL_MAX" #endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -123,7 +123,7 @@ #define _STM_CARD_SIZE 32 /* must be >= 32 */ #define _STM_MIN_CARD_COUNT 17 #define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT) -#define _STM_NSE_SIGNAL_MAX 63 +#define _STM_NSE_SIGNAL_MAX 7 #define _STM_FAST_ALLOC (66*1024) @@ -406,10 +406,6 @@ const char *msg); -/* Temporary? */ -void stm_flush_timing(stm_thread_local_t *tl, int verbose); - - /* Profiling events. In the comments: content of the markers, if any */ enum stm_event_e { /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -138,8 +138,6 @@ ... }; -void stm_flush_timing(stm_thread_local_t *, int); - typedef struct { stm_thread_local_t *tl; /* If segment_base==NULL, the remaining fields are undefined. If non-NULL, @@ -155,7 +153,6 @@ void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); -char *_stm_expand_marker(void); """) From noreply at buildbot.pypy.org Sat Oct 4 14:47:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 14:47:07 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Fix demo2.c Message-ID: <20141004124707.812141C02ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1446:40660c139f6a Date: 2014-10-04 14:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/40660c139f6a/ Log: Fix demo2.c diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -3,6 +3,7 @@ #include #include #include +#include #ifdef USE_HTM # include "../../htm-c7/stmgc.h" @@ -59,12 +60,25 @@ } void stmcb_commit_soon() {} -static void expand_marker(char *base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize) +static void timing_event(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers) { - assert(following_object == NULL); - snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); + static char *event_names[] = { STM_EVENT_NAMES }; + + char buf[1024], *p; + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + + p = buf; + p += sprintf(p, "{%.9f} %p %s", tp.tv_sec + 0.000000001 * tp.tv_nsec, + tl, event_names[event]); + if (markers != NULL) { + p += sprintf(p, ", markers: %lu, %lu", + markers[0].odd_number, markers[1].odd_number); + } + sprintf(p, "\n"); + fputs(buf, stderr); } @@ -108,18 +122,6 @@ stm_start_transaction(&stm_thread_local); - if (stm_thread_local.longest_marker_state != 0) { - fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", - &stm_thread_local, - stm_thread_local.longest_marker_state, - stm_thread_local.longest_marker_time); - fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", - stm_thread_local.longest_marker_self, - stm_thread_local.longest_marker_other); - stm_thread_local.longest_marker_state = 0; - stm_thread_local.longest_marker_time = 0.0; - } - nodeptr_t prev = initial; stm_read((objptr_t)prev); @@ -223,7 +225,6 @@ void unregister_thread_local(void) { - stm_flush_timing(&stm_thread_local, 1); stm_unregister_thread_local(&stm_thread_local); } @@ -296,7 +297,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - stmcb_expand_marker = expand_marker; + stmcb_timing_event = timing_event; setup_list(); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -438,6 +438,23 @@ _STM_EVENT_N }; +#define STM_EVENT_NAMES \ + "transaction start", \ + "transaction commit", \ + "transaction abort", \ + "contention write write", \ + "contention write read", \ + "contention inevitable", \ + "aborting other contention", \ + "wait free segment", \ + "wait sync pause", \ + "wait contention", \ + "wait done", \ + "gc minor start", \ + "gc minor done", \ + "gc major start", \ + "gc major done" + /* The markers pushed in the shadowstack are an odd number followed by a regular pointer. */ typedef struct { From noreply at buildbot.pypy.org Sat Oct 4 14:58:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 14:58:30 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: fix: was missing the stm_rewind_jmp_setjmp() call Message-ID: <20141004125830.DE17D1C02ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1447:bcba608bf644 Date: 2014-10-04 14:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/bcba608bf644/ Log: fix: was missing the stm_rewind_jmp_setjmp() call diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -398,9 +398,8 @@ { /* used to be more efficient, starting directly an inevitable transaction, but there is no real point any more, I believe */ - s_mutex_lock(); - _stm_start_transaction(tl); - _stm_become_inevitable("start_inevitable_transaction"); + stm_start_transaction(tl); + stm_become_inevitable(tl, "start_inevitable_transaction"); } From noreply at buildbot.pypy.org Sat Oct 4 15:33:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 15:33:32 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Fix marker tests Message-ID: <20141004133332.B98461C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1448:3bb5d6edf0ca Date: 2014-10-04 15:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/3bb5d6edf0ca/ Log: Fix marker tests diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -146,9 +146,11 @@ uintptr_t odd_number; object_t *object; } stm_loc_marker_t; -void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ - enum stm_event_e event, - stm_loc_marker_t *markers); + +typedef void (*stmcb_timing_event_fn)(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers); +stmcb_timing_event_fn stmcb_timing_event; void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); @@ -552,8 +554,7 @@ self.current_thread = 0 def teardown_method(self, meth): - lib.stmcb_expand_marker = ffi.NULL - lib.stmcb_debug_print = ffi.NULL + lib.stmcb_timing_event = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! @@ -639,7 +640,7 @@ self.push_root(ffi.cast("object_t *", 8)) def check_char_everywhere(self, obj, expected_content, offset=HDR): - for i in range(len(self.tls)): + for i in range(len(self.tls) + 1): addr = lib._stm_get_segment_base(i) content = addr[int(ffi.cast("uintptr_t", obj)) + offset] assert content == expected_content diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -1,8 +1,35 @@ from support import * import py, time + class TestMarker(BaseTest): + def recording(self, kind): + seen = [] + @ffi.callback("stmcb_timing_event_fn") + def timing_event(tl, event, markers): + if event == kind: + seen.append(tl) + seen.append(markers[0].tl) + seen.append(markers[0].segment_base) + seen.append(markers[0].odd_number) + seen.append(markers[0].object) + seen.append(markers[1].tl) + seen.append(markers[1].segment_base) + seen.append(markers[1].odd_number) + seen.append(markers[1].object) + lib.stmcb_timing_event = timing_event + self.timing_event_keepalive = timing_event + self.seen = seen + + def check_recording(self, i1, o1, i2, o2): + seen = self.seen + assert seen[0] == self.tls[1] + segbase = lib._stm_get_segment_base + assert seen[1:5] == [self.tls[1], segbase(2), i1, o1] + assert seen[5:9] == [self.tls[0], segbase(1), i2, o2] + assert len(seen) == 9 + def test_marker_odd_simple(self): self.start_transaction() self.push_root(ffi.cast("object_t *", 29)) @@ -13,74 +40,17 @@ assert int(ffi.cast("uintptr_t", x)) == 29 def test_abort_marker_no_shadowstack(self): - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_OUTSIDE_TRANSACTION - assert tl.longest_marker_time == 0.0 + self.recording(lib.STM_CONTENTION_WRITE_WRITE) + p = stm_allocate_old(16) # self.start_transaction() - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() + stm_set_char(p, 'A') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert tl.longest_marker_self[0] == '\x00' - assert tl.longest_marker_other[0] == '\x00' - - def test_abort_marker_shadowstack(self): + self.switch(1) self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() + py.test.raises(Conflict, stm_set_char, p, 'B') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert tl.longest_marker_self[0] == '\x00' - assert tl.longest_marker_other[0] == '\x00' - - def test_abort_marker_no_shadowstack_cb(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - seen.append(1) - lib.stmcb_expand_marker = expand_marker - seen = [] - # - self.start_transaction() - self.abort_transaction() - # - tl = self.get_stm_thread_local() - assert tl.longest_marker_self[0] == '\x00' - assert not seen - - def test_abort_marker_shadowstack_cb(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d %r\x00' % (number, ptr) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() - # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) - assert ffi.string(tl.longest_marker_other) == '' + self.check_recording(0, ffi.NULL, 0, ffi.NULL) def test_macros(self): self.start_transaction() @@ -116,72 +86,8 @@ lib.stm_pop_marker(tl) py.test.raises(EmptyStack, self.pop_root) - def test_stm_expand_marker(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d %r\x00' % (number, ptr) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - self.push_root(stm_allocate(32)) - self.push_root(stm_allocate(16)) - raw = lib._stm_expand_marker() - assert ffi.string(raw) == '29 %r' % (p,) - - def test_stmcb_debug_print(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '<<<%d>>>\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - @ffi.callback("void(char *, double, char *)") - def debug_print(cause, time, marker): - if 0.0 < time < 1.0: - time = "time_ok" - seen.append((ffi.string(cause), time, ffi.string(marker))) - seen = [] - lib.stmcb_expand_marker = expand_marker - lib.stmcb_debug_print = debug_print - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - self.abort_transaction() - # - assert seen == [("run aborted other", "time_ok", "<<<29>>>")] - - def test_multiple_markers(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - seen.append(number) - s = '%d %r\x00' % (number, ptr == ffi.NULL) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - seen = [] - lib.stmcb_expand_marker = expand_marker - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 27)) - self.push_root(p) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(ffi.cast("object_t *", ffi.NULL)) - raw = lib._stm_expand_marker() - assert ffi.string(raw) == '29 True' - assert seen == [29] - def test_double_abort_markers_cb_write_write(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_WRITE) p = stm_allocate_old(16) # self.start_transaction() @@ -200,19 +106,10 @@ self.push_root(ffi.cast("object_t *", ffi.NULL)) py.test.raises(Conflict, stm_set_char, p, 'B') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE - assert ffi.string(tl.longest_marker_self) == '21' - assert ffi.string(tl.longest_marker_other) == '19' + self.check_recording(21, ffi.NULL, 19, ffi.NULL) def test_double_abort_markers_cb_inevitable(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - c = (base + int(ffi.cast("uintptr_t", ptr)))[8] - s = '%d %r\x00' % (number, c) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_INEVITABLE) # self.start_transaction() p = stm_allocate(16) @@ -234,18 +131,10 @@ self.push_root(ffi.cast("object_t *", p)) py.test.raises(Conflict, self.become_inevitable) # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE - assert ffi.string(tl.longest_marker_self) == "21 'B'" - assert ffi.string(tl.longest_marker_other) == "19 'A'" + self.check_recording(21, p, 19, p) def test_read_write_contention(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_READ) p = stm_allocate_old(16) # self.start_transaction() @@ -262,19 +151,10 @@ self.push_root(ffi.cast("object_t *", ffi.NULL)) py.test.raises(Conflict, self.commit_transaction) # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ - assert ffi.string(tl.longest_marker_self) == '19' - assert ffi.string(tl.longest_marker_other) == ( - '') + self.check_recording(19, ffi.NULL, 0, ffi.NULL) def test_double_remote_markers_cb_write_write(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_WRITE) p = stm_allocate_old(16) # self.start_transaction() @@ -300,19 +180,10 @@ # py.test.raises(Conflict, self.switch, 0) # - tl = self.get_stm_thread_local() - assert tl is tl0 - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE - assert ffi.string(tl.longest_marker_self) == '19' - assert ffi.string(tl.longest_marker_other) == '21' + self.check_recording(21, ffi.NULL, 19, ffi.NULL) def test_double_remote_markers_cb_write_read(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_READ) p = stm_allocate_old(16) # self.start_transaction() @@ -333,8 +204,4 @@ # py.test.raises(Conflict, self.switch, 0) # - tl = self.get_stm_thread_local() - assert tl is tl0 - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ - assert ffi.string(tl.longest_marker_self)=='' - assert ffi.string(tl.longest_marker_other) == '21' + self.check_recording(21, ffi.NULL, 0, ffi.NULL) From noreply at buildbot.pypy.org Sat Oct 4 15:46:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 15:46:18 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Kill test_timing now Message-ID: <20141004134618.894461C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1449:e51fea16a737 Date: 2014-10-04 15:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/e51fea16a737/ Log: Kill test_timing now diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -8,8 +8,12 @@ seen = [] @ffi.callback("stmcb_timing_event_fn") def timing_event(tl, event, markers): - if event == kind: - seen.append(tl) + if kind == "ALL": + seen.append(event) + elif event != kind: + return + seen.append(tl) + if markers: seen.append(markers[0].tl) seen.append(markers[0].segment_base) seen.append(markers[0].odd_number) @@ -18,6 +22,8 @@ seen.append(markers[1].segment_base) seen.append(markers[1].odd_number) seen.append(markers[1].object) + else: + seen.append(None) lib.stmcb_timing_event = timing_event self.timing_event_keepalive = timing_event self.seen = seen @@ -205,3 +211,23 @@ py.test.raises(Conflict, self.switch, 0) # self.check_recording(21, ffi.NULL, 0, ffi.NULL) + + def test_all(self): + self.recording("ALL") + self.start_transaction() + self.commit_transaction() + self.start_transaction() + stm_major_collect() + self.abort_transaction() + assert self.seen == [ + lib.STM_TRANSACTION_START, self.tls[0], None, + lib.STM_GC_MINOR_START, self.tls[0], None, + lib.STM_GC_MINOR_DONE, self.tls[0], None, + lib.STM_TRANSACTION_COMMIT, self.tls[0], None, + lib.STM_TRANSACTION_START, self.tls[0], None, + lib.STM_GC_MINOR_START, self.tls[0], None, + lib.STM_GC_MINOR_DONE, self.tls[0], None, + lib.STM_GC_MAJOR_START, self.tls[0], None, + lib.STM_GC_MAJOR_DONE, self.tls[0], None, + lib.STM_TRANSACTION_ABORT, self.tls[0], None, + ] diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py deleted file mode 100644 --- a/c7/test/test_timing.py +++ /dev/null @@ -1,97 +0,0 @@ -from support import * -import py, time - - -class TestTiming(BaseTest): - - def gettimer(self, n): - tl = self.tls[self.current_thread] - lib.stm_flush_timing(tl, 1) - return tl.events[n], tl.timing[n] - - def expect_timer(self, n, expected_time, expected_count='?'): - count, real = self.gettimer(n) - print 'timer %d is %d;%s, expecting %s;%s' % (n, count, real, - expected_count, expected_time) - if expected_time == 0.0: - assert real == 0.0 - elif expected_time == "nonzero": - assert real > 0.0 - else: - assert abs(real - expected_time) < 0.09 - if expected_count != '?': - assert count == expected_count - - def test_time_outside_transaction(self): - time.sleep(0.2) - self.start_transaction() - self.commit_transaction() - self.expect_timer(lib.STM_TIME_OUTSIDE_TRANSACTION, 0.2) - - def test_time_run_current(self): - self.start_transaction() - time.sleep(0.1) - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.1, 0) - time.sleep(0.1) - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.2, 0) - self.commit_transaction() - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.0, 1) - - def test_time_run_committed(self): - self.start_transaction() - time.sleep(0.2) - self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.0, 0) - self.commit_transaction() - self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.2, 1) - - def test_time_run_aborted_write_write(self): - o = stm_allocate_old(16) - self.start_transaction() - stm_write(o) - # - self.switch(1) - self.start_transaction() - time.sleep(0.2) - py.test.raises(Conflict, stm_write, o) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_WRITE, 0.2, 1) - - def test_time_run_aborted_write_read(self): - o = stm_allocate_old(16) - self.start_transaction() - stm_read(o) - # - self.switch(1) - self.start_transaction() - time.sleep(0.2) - stm_write(o) - py.test.raises(Conflict, self.commit_transaction) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_READ, 0.2, 1) - - def test_time_run_aborted_inevitable(self): - self.start_transaction() - self.become_inevitable() - # - self.switch(1) - self.start_transaction() - time.sleep(0.2) - py.test.raises(Conflict, self.become_inevitable) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_INEVITABLE, 0.2, 1) - - def test_time_run_aborted_other(self): - self.start_transaction() - time.sleep(0.2) - self.abort_transaction() - self.expect_timer(lib.STM_TIME_RUN_ABORTED_OTHER, 0.2, 1) - - def test_time_minor_gc(self): - self.start_transaction() - self.expect_timer(lib.STM_TIME_MINOR_GC, 0.0, 0) - stm_minor_collect() - self.expect_timer(lib.STM_TIME_MINOR_GC, "nonzero", 1) - self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0, 0) - - def test_time_major_gc(self): - self.start_transaction() - self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0, 0) - stm_major_collect() - self.expect_timer(lib.STM_TIME_MAJOR_GC, "nonzero", 1) From noreply at buildbot.pypy.org Sat Oct 4 17:23:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 17:23:48 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Write the full to-disk profiling support here too Message-ID: <20141004152348.F37871C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1450:8f88cdb1d916 Date: 2014-10-04 17:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/8f88cdb1d916/ Log: Write the full to-disk profiling support here too diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -55,14 +55,12 @@ s_mutex_unlock(); bool was_in_transaction = _stm_in_transaction(this_tl); - if (was_in_transaction) { - stm_become_inevitable(this_tl, "fork"); - /* Note that the line above can still fail and abort, which should - be fine */ - } - else { - stm_start_inevitable_transaction(this_tl); - } + if (!was_in_transaction) + stm_start_transaction(this_tl); + + stm_become_inevitable(this_tl, "fork"); + /* Note that the line above can still fail and abort, which should + be fine */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -203,6 +201,9 @@ just release these locks early */ s_mutex_unlock(); + /* Open a new profiling file, if any */ + forksupport_open_new_profiling_file(); + /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ diff --git a/c7/stm/prof.c b/c7/stm/prof.c new file mode 100644 --- /dev/null +++ b/c7/stm/prof.c @@ -0,0 +1,102 @@ +#include + + +static FILE *profiling_file; +static char *profiling_basefn = NULL; +static int (*profiling_expand_marker)(stm_loc_marker_t *, char *, int); + + +static void _stm_profiling_event(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers) +{ + struct buf_s { + uint32_t tv_sec; + uint32_t tv_nsec; + uint32_t thread_num; + uint8_t event; + uint8_t marker_length[2]; + char extra[256]; + } __attribute__((packed)); + + struct buf_s buf; + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + buf.tv_sec = t.tv_sec; + buf.tv_nsec = t.tv_nsec; + buf.thread_num = tl->thread_local_counter; + buf.event = event; + + int len0 = 0; + int len1 = 0; + if (markers != NULL) { + if (markers[0].odd_number != 0) + len0 = profiling_expand_marker(&markers[0], buf.extra, 128); + if (markers[1].odd_number != 0) + len1 = profiling_expand_marker(&markers[1], buf.extra + len0, 128); + } + buf.marker_length[0] = len0; + buf.marker_length[1] = len1; + + fwrite(&buf, offsetof(struct buf_s, extra) + len0 + len1, + 1, profiling_file); +} + +static int default_expand_marker(stm_loc_marker_t *m, char *p, int s) +{ + *(uintptr_t *)p = m->odd_number; + return sizeof(uintptr_t); +} + +static bool open_timing_log(const char *filename) +{ + profiling_file = fopen(filename, "w"); + if (profiling_file == NULL) + return false; + + fwrite("STMGC-C7-PROF01\n", 16, 1, profiling_file); + stmcb_timing_event = _stm_profiling_event; + return true; +} + +static bool close_timing_log(void) +{ + if (stmcb_timing_event == &_stm_profiling_event) { + stmcb_timing_event = NULL; + fclose(profiling_file); + profiling_file = NULL; + return true; + } + return false; +} + +static void forksupport_open_new_profiling_file(void) +{ + if (close_timing_log() && profiling_basefn != NULL) { + char filename[1024]; + snprintf(filename, sizeof(filename), + "%s.fork%ld", profiling_basefn, (long)getpid()); + open_timing_log(filename); + } +} + +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)) +{ + close_timing_log(); + free(profiling_basefn); + profiling_basefn = NULL; + + if (profiling_file_name == NULL) + return 0; + + if (!expand_marker) + expand_marker = default_expand_marker; + profiling_expand_marker = expand_marker; + + if (!open_timing_log(profiling_file_name)) + return -1; + + profiling_basefn = strdup(profiling_file_name); + return 0; +} diff --git a/c7/stm/prof.h b/c7/stm/prof.h new file mode 100644 --- /dev/null +++ b/c7/stm/prof.h @@ -0,0 +1,2 @@ + +static void forksupport_open_new_profiling_file(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -225,6 +225,8 @@ return (pthread_t *)(tl->creating_pthread); } +static int thread_local_counters = 0; + void stm_register_thread_local(stm_thread_local_t *tl) { int num; @@ -247,6 +249,7 @@ numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + tl->thread_local_counter = ++thread_local_counters; *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -15,6 +15,7 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/marker.h" +#include "stm/prof.h" #include "stm/misc.c" #include "stm/list.c" @@ -34,4 +35,5 @@ #include "stm/fprintcolor.c" #include "stm/weakref.c" #include "stm/marker.c" +#include "stm/prof.c" #include "stm/rewind_setjmp.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -66,10 +66,11 @@ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; /* after an abort, some details about the abort are stored there. - (these fields are not modified on a successful commit) */ + (this field is not modified on a successful commit) */ long last_abort__bytes_in_nursery; /* the next fields are handled internally by the library */ int associated_segment_num; + int thread_local_counter; struct stm_thread_local_s *prev, *next; void *creating_pthread[2]; } stm_thread_local_t; @@ -467,6 +468,17 @@ enum stm_event_e event, stm_loc_marker_t *markers); +/* Calling this sets up a stmcb_timing_event callback that will produce + a binary file calling 'profiling_file_name'. After a fork(), it is + written to 'profiling_file_name.fork'. Call it with NULL to + stop profiling. Returns -1 in case of error (see errno then). + The optional 'expand_marker' function pointer is called to expand + the marker's odd_number and object into data, starting at the given + position and with the given maximum length. */ +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)); + + /* Convenience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ uintptr_t _odd_num = (odd_num); \ @@ -492,8 +504,6 @@ _ss->ss = (object_t *)_odd_num; \ } while (0) -char *_stm_expand_marker(void); - /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -152,6 +152,9 @@ stm_loc_marker_t *markers); stmcb_timing_event_fn stmcb_timing_event; +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)); + void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); diff --git a/c7/test/test_prof.py b/c7/test/test_prof.py new file mode 100644 --- /dev/null +++ b/c7/test/test_prof.py @@ -0,0 +1,77 @@ +from support import * +import py, os, struct + +udir = py.path.local.make_numbered_dir(prefix = 'stmgc-') + + +def read_log(filename): + f = open(filename, 'rb') + header = f.read(16) + assert header == "STMGC-C7-PROF01\n" + result = [] + while True: + packet = f.read(15) + if not packet: break + sec, nsec, threadnum, event, len0, len1 = \ + struct.unpack("IIIBBB", packet) + result.append((sec + 0.000000001 * nsec, + threadnum, + event, + f.read(len0), + f.read(len1))) + f.close() + return result + + +class TestProf(BaseTest): + + def test_simple(self): + filename = os.path.join(str(udir), 'simple.prof') + r = lib.stm_set_timing_log(filename, ffi.NULL) + assert r == 0 + try: + self.start_transaction() + self.commit_transaction() + finally: + lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + + result = read_log(filename) + assert result[0][2] == lib.STM_TRANSACTION_START + assert result[1][2] == lib.STM_GC_MINOR_START + assert result[2][2] == lib.STM_GC_MINOR_DONE + assert result[3][2] == lib.STM_TRANSACTION_COMMIT + assert len(result) == 4 + + def test_contention(self): + @ffi.callback("int(stm_loc_marker_t *, char *, int)") + def expand_marker(marker, p, s): + p[0] = chr(100 + marker.odd_number) + return 1 + filename = os.path.join(str(udir), 'contention.prof') + r = lib.stm_set_timing_log(filename, expand_marker) + assert r == 0 + try: + p = stm_allocate_old(16) + self.start_transaction() + assert stm_get_char(p) == '\x00' # read + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # write + py.test.raises(Conflict, self.commit_transaction) + finally: + lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + + result = read_log(filename) + id0 = result[0][1] + id1 = result[1][1] + assert result[0][1:5] == (id0, lib.STM_TRANSACTION_START, '', '') + assert result[1][1:5] == (id1, lib.STM_TRANSACTION_START, '', '') + assert result[2][1:5] == (id1, lib.STM_GC_MINOR_START, '', '') + assert result[3][1:5] == (id1, lib.STM_GC_MINOR_DONE, '', '') + assert result[4][1:5] == (id1, lib.STM_CONTENTION_WRITE_READ, + chr(119), '') + assert result[5][1:5] == (id1, lib.STM_TRANSACTION_ABORT, '', '') + assert len(result) == 6 From noreply at buildbot.pypy.org Sat Oct 4 18:16:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:16:52 +0200 (CEST) Subject: [pypy-commit] stmgc default: check that we can use stm_start_inevitable_transaction() without Message-ID: <20141004161652.981041C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1451:4210e49aad58 Date: 2014-10-04 18:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/4210e49aad58/ Log: check that we can use stm_start_inevitable_transaction() without any rjbuf on the stack diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -295,10 +295,16 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + + /* check that we can use stm_start_inevitable_transaction() without + any rjbuf on the stack */ + stm_start_inevitable_transaction(&stm_thread_local); + stm_commit_transaction(); + + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); stmcb_expand_marker = expand_marker; - setup_list(); From noreply at buildbot.pypy.org Sat Oct 4 18:16:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:16:53 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: hg merge default Message-ID: <20141004161653.B2C6F1C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1452:ddf145dd0a56 Date: 2014-10-04 18:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/ddf145dd0a56/ Log: hg merge default diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -296,10 +296,16 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + + /* check that we can use stm_start_inevitable_transaction() without + any rjbuf on the stack */ + stm_start_inevitable_transaction(&stm_thread_local); + stm_commit_transaction(); + + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); stmcb_timing_event = timing_event; - setup_list(); diff --git a/hashtable/design.txt b/hashtable/design.txt --- a/hashtable/design.txt +++ b/hashtable/design.txt @@ -70,5 +70,10 @@ special-cased in the implementation). More precisely, len(), keys(), clear(), etc., set all the lines' read markers; clear() additionally sets all the non-empty lines' write markers (so that it doesn't conflict - with another transaction checking that some key is really not in the + with another transaction checking that some different key is not in the dict). + +* We have an additional pair of markers (read and write) for the 'empty' + flag. It is read whenever we check 'bool(dict)'. It is written only + when we are about to commit and the emptiness state changed in this + transaction. diff --git a/hashtable/stmcheck.py b/hashtable/stmcheck.py new file mode 100644 --- /dev/null +++ b/hashtable/stmcheck.py @@ -0,0 +1,47 @@ +import py +import thread, time, sys +from __pypy__.thread import * + +try: + from pypyjit import set_param +except ImportError: + def set_param(value): + pass + + +class Conflict(Exception): + pass + + +def check_no_conflict(function_list, repeat=10000): + set_param("off") + # + def fn(index): + function = function_list[index] + sys.stdout.write("*** start %d ***\n" % index) + reset_longest_abort_info() + hint_commit_soon() + i = 0 + while i < repeat: + function() + i += 1 + hint_commit_soon() + abort_info = longest_abort_info() + with atomic: + abort_infos.append(abort_info) + if len(abort_infos) == count: + finished.release() + # + abort_infos = [] + finished = thread.allocate_lock() + finished.acquire() + count = len(function_list) + tlist = [thread.start_new_thread(fn, (i,)) for i in range(count)] + finished.acquire() + for i in range(count): + print 'thread %d: %r' % (i, abort_infos[i]) + if abort_infos != [None] * count: + raise Conflict + +def check_conflict(*args, **kwds): + py.test.raises(Conflict, check_no_conflict, *args, **kwds) diff --git a/hashtable/test_stmcheck.py b/hashtable/test_stmcheck.py new file mode 100644 --- /dev/null +++ b/hashtable/test_stmcheck.py @@ -0,0 +1,31 @@ +import stmcheck + + +def test_no_conflict(): + def t1(): + pass + def t2(): + pass + stmcheck.check_no_conflict([t1, t2]) + +def test_obvious_conflict(): + lst = [0] + def t1(): + lst[0] += 1 + stmcheck.check_conflict([t1, t1]) + +def test_no_conflict_if_writing_to_different_lists(): + lst = [[0], [0]] + def t1(): + lst[0][0] += 1 + def t2(): + lst[1][0] += 1 + stmcheck.check_no_conflict([t1, t2]) + +def test_conflict_even_if_writing_to_different_offsets(): + lst = [0, 0] + def t1(): + lst[0] += 1 + def t2(): + lst[1] += 1 + stmcheck.check_conflict([t1, t2]) From noreply at buildbot.pypy.org Sat Oct 4 18:16:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:16:54 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Fix the failure shown by demo2.c Message-ID: <20141004161654.C2C981C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1453:57b388129192 Date: 2014-10-04 18:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/57b388129192/ Log: Fix the failure shown by demo2.c diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -398,8 +398,13 @@ { /* used to be more efficient, starting directly an inevitable transaction, but there is no real point any more, I believe */ + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + stm_start_transaction(tl); stm_become_inevitable(tl, "start_inevitable_transaction"); + + stm_rewind_jmp_leaveframe(tl, &rjbuf); } From noreply at buildbot.pypy.org Sat Oct 4 18:19:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:19:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/8f88cdb1d916 Message-ID: <20141004161905.32D391C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73767:d0263e2a9370 Date: 2014-10-04 17:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d0263e2a9370/ Log: import stmgc/8f88cdb1d916 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -83e4c655d31b +8f88cdb1d916 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -4,34 +4,50 @@ #endif -enum contention_kind_e { +/* Here are the possible kinds of contention: - /* A write-write contention occurs when we running our transaction - and detect that we are about to write to an object that another - thread is also writing to. This kind of contention must be - resolved before continuing. This *must* abort one of the two - threads: the caller's thread is not at a safe-point, so cannot - wait! */ - WRITE_WRITE_CONTENTION, + STM_CONTENTION_WRITE_WRITE - /* A write-read contention occurs when we are trying to commit: it + A write-write contention occurs when we are running our + transaction and detect that we are about to write to an object + that another thread is also writing to. This kind of + contention must be resolved before continuing. This *must* + abort one of the two threads: the caller's thread is not at a + safe-point, so cannot wait! + + It is reported as a timing event with the following two markers: + the current thread (i.e. where the second-in-time write occurs); + and the other thread (from its 'modified_old_objects_markers', + where the first-in-time write occurred). + + STM_CONTENTION_WRITE_READ + + A write-read contention occurs when we are trying to commit: it means that an object we wrote to was also read by another transaction. Even though it would seem obvious that we should just abort the other thread and proceed in our commit, a more subtle answer would be in some cases to wait for the other thread to commit first. It would commit having read the old value, and - then we can commit our change to it. */ - WRITE_READ_CONTENTION, + then we can commit our change to it. - /* An inevitable contention occurs when we're trying to become + It is reported as a timing event with only one marker: the + older location of the write that was done by the current thread. + + STM_CONTENTION_INEVITABLE + + An inevitable contention occurs when we're trying to become inevitable but another thread already is. We can never abort the other thread in this case, but we still have the choice to abort - ourselves or pause until the other thread commits. */ - INEVITABLE_CONTENTION, -}; + ourselves or pause until the other thread commits. + + It is reported with two markers, one for the current thread and + one for the other thread. Each marker gives the location that + attempts to make the transaction inevitable. +*/ + struct contmgr_s { - enum contention_kind_e kind; + enum stm_event_e kind; struct stm_priv_segment_info_s *other_pseg; bool abort_other; bool try_sleep; // XXX add a way to timeout, but should handle repeated @@ -100,7 +116,7 @@ static bool contention_management(uint8_t other_segment_num, - enum contention_kind_e kind, + enum stm_event_e kind, object_t *obj) { assert(_has_mutex()); @@ -110,6 +126,9 @@ if (must_abort()) abort_with_mutex(); + /* Report the contention */ + timing_contention(kind, other_segment_num, obj); + /* Who should abort here: this thread, or the other thread? */ struct contmgr_s contmgr; contmgr.kind = kind; @@ -139,20 +158,9 @@ contmgr.abort_other = false; } - - int wait_category = - kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : - STM_TIME_WAIT_OTHER; - - int abort_category = - kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : - kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : - STM_TIME_RUN_ABORTED_OTHER; - - - if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && + /* Do one of three things here... + */ + if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { others_may_have_run = true; /* Sleep. @@ -165,14 +173,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; - marker_contention(kind, false, other_segment_num, obj); - - change_timing_state(wait_category); /* tell the other to commit ASAP */ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); @@ -181,14 +187,6 @@ if (must_abort()) abort_with_mutex(); - - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - double elapsed = - change_timing_state_tl(pseg->pub.running_thread, - STM_TIME_RUN_CURRENT); - marker_copy(pseg->pub.running_thread, pseg, - wait_category, elapsed); } else if (!contmgr.abort_other) { @@ -196,16 +194,13 @@ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("abort in contention: kind %d\n", kind)); - STM_SEGMENT->nursery_end = abort_category; - marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = abort_category; - marker_contention(kind, true, other_segment_num, obj); + contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -297,7 +292,8 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); + contention_management(other_segment_num, + STM_CONTENTION_WRITE_WRITE, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -309,10 +305,12 @@ static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, + STM_CONTENTION_WRITE_READ, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); + contention_management(other_segment_num, + STM_CONTENTION_INEVITABLE, NULL); } diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -125,17 +125,13 @@ dprintf_test(("write_slowpath %p -> mod_old\n", obj)); - /* First change to this old object from this transaction. + /* Add the current marker, recording where we wrote to this object */ + timing_record_write(); + + /* Change to this old object from this transaction. Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); - /* Add the current marker, recording where we wrote to this object */ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->modified_old_objects_markers = - list_append2(STM_PSEGMENT->modified_old_objects_markers, - marker[0], marker[1]); - release_marker_lock(STM_SEGMENT->segment_base); /* We need to privatize the pages containing the object, if they @@ -329,29 +325,24 @@ STM_SEGMENT->transaction_read_version = 1; } -static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) +static uint64_t _global_start_time = 0; + +static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - retry: - if (inevitable) { - wait_for_end_of_inevitable_transaction(tl); - } - - if (!acquire_thread_segment(tl)) - goto retry; + while (!acquire_thread_segment(tl)) + ; /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - change_timing_state(STM_TIME_RUN_CURRENT); - STM_PSEGMENT->start_time = tl->_timing_cur_start; + timing_event(tl, STM_TRANSACTION_START); + STM_PSEGMENT->start_time = _global_start_time++; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; - STM_PSEGMENT->marker_inev[1] = 0; - if (inevitable) - marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); + STM_PSEGMENT->marker_inev.object = NULL; + STM_PSEGMENT->transaction_state = TS_REGULAR; #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -400,14 +391,16 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif - _stm_start_transaction(tl, false); + _stm_start_transaction(tl); return repeat_count; } void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - s_mutex_lock(); - _stm_start_transaction(tl, true); + /* used to be more efficient, starting directly an inevitable transaction, + but there is no real point any more, I believe */ + stm_start_transaction(tl); + stm_become_inevitable(tl, "start_inevitable_transaction"); } @@ -450,7 +443,10 @@ return true; } /* we aborted the other transaction without waiting, so - we can just continue */ + we can just break out of this loop on + modified_old_objects and continue with the next + segment */ + break; } })); } @@ -784,13 +780,13 @@ list_clear(STM_PSEGMENT->modified_old_objects_markers); } -static void _finish_transaction(int attribute_to) +static void _finish_transaction(enum stm_event_e event) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; /* marker_inev is not needed anymore */ - STM_PSEGMENT->marker_inev[1] = 0; + STM_PSEGMENT->marker_inev.object = NULL; /* reset these lists to NULL for the next transaction */ _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); @@ -798,9 +794,9 @@ list_clear(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); - timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + timing_event(tl, event); - stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } @@ -813,9 +809,6 @@ minor_collection(/*commit=*/ true); - /* the call to minor_collection() above leaves us with - STM_TIME_BOOKKEEPING */ - /* synchronize overflow objects living in privatized pages */ push_overflow_objects_from_privatized_pages(); @@ -839,9 +832,9 @@ /* if a major collection is required, do it here */ if (is_major_collection_requested()) { - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); major_collection_now_at_safe_point(); - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } /* synchronize modified old objects to other threads */ @@ -868,7 +861,7 @@ } /* done */ - _finish_transaction(STM_TIME_RUN_COMMITTED); + _finish_transaction(STM_TRANSACTION_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -961,10 +954,6 @@ (int)pseg->transaction_state); } - /* if we don't have marker information already, look up and preserve - the marker information from the shadowstack as a string */ - marker_default_for_abort(pseg); - /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -1053,16 +1042,13 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - int attribute_to = STM_TIME_RUN_ABORTED_OTHER; - if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ - attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(attribute_to); + _finish_transaction(STM_TRANSACTION_ABORT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -1104,8 +1090,8 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - marker_fetch_inev(); - wait_for_end_of_inevitable_transaction(NULL); + timing_fetch_inev(); + wait_for_end_of_inevitable_transaction(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); invoke_and_clear_user_callbacks(0); /* for commit */ diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -139,7 +139,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - double start_time; + uint64_t start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the @@ -197,10 +197,8 @@ pthread_t running_pthread; #endif - /* Temporarily stores the marker information */ - char marker_self[_STM_MARKER_LEN]; - char marker_other[_STM_MARKER_LEN]; - uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ + /* marker where this thread became inevitable */ + stm_loc_marker_t marker_inev; }; enum /* safe_point */ { diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -56,14 +56,12 @@ s_mutex_unlock(); bool was_in_transaction = _stm_in_transaction(this_tl); - if (was_in_transaction) { - stm_become_inevitable(this_tl, "fork"); - /* Note that the line above can still fail and abort, which should - be fine */ - } - else { - stm_start_inevitable_transaction(this_tl); - } + if (!was_in_transaction) + stm_start_transaction(this_tl); + + stm_become_inevitable(this_tl, "fork"); + /* Note that the line above can still fail and abort, which should + be fine */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -188,7 +186,6 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif - strcpy(pr->marker_self, "fork"); tl->shadowstack = NULL; pr->shadowstack_at_start_of_transaction = NULL; stm_rewind_jmp_forget(tl); @@ -205,6 +202,9 @@ just release these locks early */ s_mutex_unlock(); + /* Open a new profiling file, if any */ + forksupport_open_new_profiling_file(); + /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -142,7 +142,7 @@ if (is_major_collection_requested()) { /* if still true */ - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -150,7 +150,7 @@ major_collection_now_at_safe_point(); } - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } s_mutex_unlock(); @@ -447,9 +447,9 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->marker_inev[1]) { - uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; - mark_visit_object((object_t *)marker_inev_obj, base); + if (get_priv_segment(j)->marker_inev.segment_base) { + object_t *marker_inev_obj = get_priv_segment(j)->marker_inev.object; + mark_visit_object(marker_inev_obj, base); } } } diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -4,18 +4,11 @@ #endif -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); - -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); - - -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +static void marker_fetch(stm_loc_marker_t *out_marker) { - /* fetch the current marker from the tl's shadow stack, - and return it in 'marker[2]'. */ + /* Fetch the current marker from the 'out_marker->tl's shadow stack, + and return it in 'out_marker->odd_number' and 'out_marker->object'. */ + stm_thread_local_t *tl = out_marker->tl; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; @@ -29,85 +22,31 @@ } if (current != base) { /* found the odd marker */ - marker[0] = (uintptr_t)current[0].ss; - marker[1] = (uintptr_t)current[1].ss; + out_marker->odd_number = (uintptr_t)current[0].ss; + out_marker->object = current[1].ss; } else { /* no marker found */ - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } } -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker) +static void _timing_fetch_inev(void) { - /* Expand the marker given by 'marker[2]' into a full string. This - works assuming that the marker was produced inside the segment - given by 'segment_base'. If that's from a different thread, you - must first acquire the corresponding 'marker_lock'. */ - assert(_has_mutex()); - outmarker[0] = 0; - if (marker[0] == 0) - return; /* no marker entry found */ - if (stmcb_expand_marker != NULL) { - stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], - outmarker, _STM_MARKER_LEN); - } + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); + STM_PSEGMENT->marker_inev.odd_number = marker.odd_number; + STM_PSEGMENT->marker_inev.object = marker.object; } -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker) { - if (pseg->marker_self[0] != 0) - return; /* already collected an entry */ - - uintptr_t marker[2]; - marker_fetch(pseg->pub.running_thread, marker); - marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); - pseg->marker_other[0] = 0; -} - -char *_stm_expand_marker(void) -{ - /* for tests only! */ - static char _result[_STM_MARKER_LEN]; - uintptr_t marker[2]; - _result[0] = 0; - s_mutex_lock(); - marker_fetch(STM_SEGMENT->running_thread, marker); - marker_expand(marker, STM_SEGMENT->segment_base, _result); - s_mutex_unlock(); - return _result; -} - -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time) -{ - /* Copies the marker information from pseg to tl. This is called - indirectly from abort_with_mutex(), but only if the lost time is - greater than that of the previous recorded marker. By contrast, - pseg->marker_self has been filled already in all cases. The - reason for the two steps is that we must fill pseg->marker_self - earlier than now (some objects may be GCed), but we only know - here the total time it gets attributed. + /* From 'out_marker->tl', fill in 'out_marker->segment_base' and + 'out_marker->odd_number' and 'out_marker->object' from the + marker associated with writing the 'obj'. */ - if (stmcb_debug_print) { - stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); - } - if (time * 0.99 > tl->longest_marker_time) { - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; - memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); - memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); - } - pseg->marker_self[0] = 0; - pseg->marker_other[0] = 0; -} - -static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, - uintptr_t marker[2]) -{ assert(_has_mutex()); /* here, we acquired the other thread's marker_lock, which means that: @@ -119,80 +58,86 @@ the global mutex_lock at this point too). */ long i; + int in_segment_num = out_marker->tl->associated_segment_num; struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); struct list_s *mlst = pseg->modified_old_objects; struct list_s *mlstm = pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { + assert(list_count(mlstm) <= 2 * list_count(mlst)); + for (i = list_count(mlstm) / 2; --i >= 0; ) { if (list_item(mlst, i) == (uintptr_t)obj) { - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); + out_marker->odd_number = list_item(mlstm, i * 2 + 0); + out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1); return; } } - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj) +static void _timing_record_write(void) { - uintptr_t self_marker[2]; - uintptr_t other_marker[2]; - struct stm_priv_segment_info_s *my_pseg, *other_pseg; + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + long base_count = list_count(STM_PSEGMENT->modified_old_objects); + struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers; + while (list_count(mlstm) < 2 * base_count) { + mlstm = list_append2(mlstm, 0, 0); + } + mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object); + STM_PSEGMENT->modified_old_objects_markers = mlstm; +} + +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj) +{ + struct stm_priv_segment_info_s *other_pseg; other_pseg = get_priv_segment(other_segment_num); - char *my_segment_base = STM_SEGMENT->segment_base; - char *other_segment_base = get_segment_base(other_segment_num); + char *other_segment_base = other_pseg->pub.segment_base; + acquire_marker_lock(other_segment_base); - acquire_marker_lock(other_segment_base); + stm_loc_marker_t markers[2]; /* Collect the location for myself. It's usually the current location, except in a write-read abort, in which case it's the older location of the write. */ - if (kind == WRITE_READ_CONTENTION) - marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + markers[0].tl = STM_SEGMENT->running_thread; + markers[0].segment_base = STM_SEGMENT->segment_base; + + if (kind == STM_CONTENTION_WRITE_READ) + marker_fetch_obj_write(obj, &markers[0]); else - marker_fetch(my_pseg->pub.running_thread, self_marker); - - /* Expand this location into either my_pseg->marker_self or - other_pseg->marker_other, depending on who aborts. */ - marker_expand(self_marker, my_segment_base, - abort_other ? other_pseg->marker_other - : my_pseg->marker_self); + marker_fetch(&markers[0]); /* For some categories, we can also collect the relevant information for the other segment. */ - char *outmarker = abort_other ? other_pseg->marker_self - : my_pseg->marker_other; + markers[1].tl = other_pseg->pub.running_thread; + markers[1].segment_base = other_pseg->pub.segment_base; + switch (kind) { - case WRITE_WRITE_CONTENTION: - marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + case STM_CONTENTION_WRITE_WRITE: + marker_fetch_obj_write(obj, &markers[1]); break; - case INEVITABLE_CONTENTION: - assert(abort_other == false); - other_marker[0] = other_pseg->marker_inev[0]; - other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); - break; - case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); + case STM_CONTENTION_INEVITABLE: + markers[1].odd_number = other_pseg->marker_inev.odd_number; + markers[1].object = other_pseg->marker_inev.object; break; default: - outmarker[0] = 0; + markers[1].odd_number = 0; + markers[1].object = NULL; break; } + stmcb_timing_event(markers[0].tl, kind, markers); + + /* only release the lock after stmcb_timing_event(), otherwise it could + run into race conditions trying to interpret 'markers[1].object' */ release_marker_lock(other_segment_base); } -static void marker_fetch_inev(void) -{ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->marker_inev[0] = marker[0]; - STM_PSEGMENT->marker_inev[1] = marker[1]; -} + +void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); diff --git a/rpython/translator/stm/src_stm/stm/marker.h b/rpython/translator/stm/src_stm/stm/marker.h --- a/rpython/translator/stm/src_stm/stm/marker.h +++ b/rpython/translator/stm/src_stm/stm/marker.h @@ -1,13 +1,20 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); -static void marker_fetch_inev(void); -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker); -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time); +static void _timing_record_write(void); +static void _timing_fetch_inev(void); +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj); -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj); + +#define timing_event(tl, event) \ + (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) : (void)0) + +#define timing_record_write() \ + (stmcb_timing_event != NULL ? _timing_record_write() : (void)0) + +#define timing_fetch_inev() \ + (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0) + +#define timing_contention(kind, other_segnum, obj) \ + (stmcb_timing_event != NULL ? \ + _timing_contention(kind, other_segnum, obj) : (void)0) diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -426,11 +426,13 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->marker_inev[1]) { - uintptr_t *pmarker_inev_obj = (uintptr_t *) + if (STM_PSEGMENT->marker_inev.segment_base) { + assert(STM_PSEGMENT->marker_inev.segment_base == + STM_SEGMENT->segment_base); + object_t **pmarker_inev_obj = (object_t **) REAL_ADDRESS(STM_SEGMENT->segment_base, - &STM_PSEGMENT->marker_inev[1]); - minor_trace_if_young((object_t **)pmarker_inev_obj); + &STM_PSEGMENT->marker_inev.object); + minor_trace_if_young(pmarker_inev_obj); } } @@ -573,11 +575,11 @@ stm_safe_point(); - change_timing_state(STM_TIME_MINOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START); _do_minor_collection(commit); - change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE); } void stm_collect(long level) diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h --- a/rpython/translator/stm/src_stm/stm/nursery.h +++ b/rpython/translator/stm/src_stm/stm/nursery.h @@ -1,8 +1,14 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER -#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON +/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */ +#define NSE_SIGABORT 1 +#define NSE_SIGPAUSE 2 +#define NSE_SIGCOMMITSOON 3 +#define _NSE_NUM_SIGNALS 4 + +#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX +# error "increase _STM_NSE_SIGNAL_MAX" +#endif static uint32_t highest_overflow_number; diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -23,8 +23,8 @@ static char *setup_mmap(char *reason, int *map_fd) { char name[128]; - sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", - (long)getpid(), get_stm_time()); + sprintf(name, "/stmgc-c7-bigmem-%ld", + (long)getpid()); /* Create the big shared memory object, and immediately unlink it. There is a small window where if this process is killed the @@ -226,6 +226,8 @@ return (pthread_t *)(tl->creating_pthread); } +static int thread_local_counters = 0; + void stm_register_thread_local(stm_thread_local_t *tl) { int num; @@ -242,14 +244,13 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; - tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; - tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + tl->thread_local_counter = ++thread_local_counters; *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -124,32 +124,19 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction( - stm_thread_local_t *tl_or_null_if_can_abort) +static void wait_for_end_of_inevitable_transaction(void) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->transaction_state == TS_INEVITABLE) { - if (tl_or_null_if_can_abort == NULL) { - /* handle this case like a contention: it will either - abort us (not the other thread, which is inevitable), - or wait for a while. If we go past this call, then we - waited; in this case we have to re-check if no other - thread is inevitable. */ - inevitable_contention_management(i); - } - else { - /* wait for stm_commit_transaction() to finish this - inevitable transaction */ - signal_other_to_commit_soon(other_pseg); - change_timing_state_tl(tl_or_null_if_can_abort, - STM_TIME_WAIT_INEVITABLE); - cond_wait(C_INEVITABLE); - /* don't bother changing the timing state again: the caller - will very soon go to STM_TIME_RUN_CURRENT */ - } + /* handle this case like a contention: it will either + abort us (not the other thread, which is inevitable), + or wait for a while. If we go past this call, then we + waited; in this case we have to re-check if no other + thread is inevitable. */ + inevitable_contention_management(i); goto restart; } } @@ -189,8 +176,9 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ - change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); + timing_event(tl, STM_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); + timing_event(tl, STM_WAIT_DONE); /* Return false to the caller, which will call us again */ return false; @@ -332,7 +320,6 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) return; /* fast path: no safe point requested */ - int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -343,10 +330,6 @@ break; /* no safe point requested */ if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); - } - STM_PSEGMENT->signalled_to_commit_soon = true; stmcb_commit_soon(); if (!pause_signalled) { @@ -363,17 +346,12 @@ #ifdef STM_TESTS abort_with_mutex(); #endif - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); - } + timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; - } - - if (previous_state != -1) { - change_timing_state(previous_state); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); } } diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h --- a/rpython/translator/stm/src_stm/stm/sync.h +++ b/rpython/translator/stm/src_stm/stm/sync.h @@ -29,7 +29,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); +static void wait_for_end_of_inevitable_transaction(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -15,8 +15,8 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" -#include "stm/timing.h" #include "stm/marker.h" +#include "stm/prof.h" #include "stm/misc.c" #include "stm/list.c" @@ -35,6 +35,6 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" -#include "stm/timing.c" #include "stm/marker.c" +#include "stm/prof.c" #include "stm/rewind_setjmp.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -55,28 +55,6 @@ object_t *ss; }; -enum stm_time_e { - STM_TIME_OUTSIDE_TRANSACTION, - STM_TIME_RUN_CURRENT, - STM_TIME_RUN_COMMITTED, - STM_TIME_RUN_ABORTED_WRITE_WRITE, - STM_TIME_RUN_ABORTED_WRITE_READ, - STM_TIME_RUN_ABORTED_INEVITABLE, - STM_TIME_RUN_ABORTED_OTHER, - STM_TIME_WAIT_FREE_SEGMENT, - STM_TIME_WAIT_WRITE_READ, - STM_TIME_WAIT_INEVITABLE, - STM_TIME_WAIT_OTHER, - STM_TIME_SYNC_COMMIT_SOON, - STM_TIME_BOOKKEEPING, - STM_TIME_MINOR_GC, - STM_TIME_MAJOR_GC, - STM_TIME_SYNC_PAUSE, - _STM_TIME_N -}; - -#define _STM_MARKER_LEN 80 - typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -89,20 +67,11 @@ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; /* after an abort, some details about the abort are stored there. - (these fields are not modified on a successful commit) */ + (this field is not modified on a successful commit) */ long last_abort__bytes_in_nursery; - /* timing information, accumulated */ - uint32_t events[_STM_TIME_N]; - float timing[_STM_TIME_N]; - double _timing_cur_start; - enum stm_time_e _timing_cur_state; - /* the marker with the longest associated time so far */ - enum stm_time_e longest_marker_state; - double longest_marker_time; - char longest_marker_self[_STM_MARKER_LEN]; - char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; + int thread_local_counter; struct stm_thread_local_s *prev, *next; void *creating_pthread[2]; } stm_thread_local_t; @@ -156,7 +125,7 @@ #define _STM_CARD_SIZE 32 /* must be >= 32 */ #define _STM_MIN_CARD_COUNT 17 #define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT) -#define _STM_NSE_SIGNAL_MAX _STM_TIME_N +#define _STM_NSE_SIGNAL_MAX 7 #define _STM_FAST_ALLOC (66*1024) @@ -439,20 +408,79 @@ const char *msg); -/* Temporary? */ -void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* Profiling events. In the comments: content of the markers, if any */ +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ + STM_TRANSACTION_START, + STM_TRANSACTION_COMMIT, + STM_TRANSACTION_ABORT, + /* contention; see details at the start of contention.c */ + STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */ + STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing */ + STM_CONTENTION_INEVITABLE, /* markers: self loc / other inev loc */ + + /* following a contention, we get from the same thread one of: + STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), + or STM_WAIT_CONTENTION (self-wait). */ + STM_ABORTING_OTHER_CONTENTION, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNC_PAUSE, + STM_WAIT_CONTENTION, + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_DONE, + STM_GC_MAJOR_START, + STM_GC_MAJOR_DONE, + + _STM_EVENT_N +}; + +#define STM_EVENT_NAMES \ + "transaction start", \ + "transaction commit", \ + "transaction abort", \ + "contention write write", \ + "contention write read", \ + "contention inevitable", \ + "aborting other contention", \ + "wait free segment", \ + "wait sync pause", \ + "wait contention", \ + "wait done", \ + "gc minor start", \ + "gc minor done", \ + "gc major start", \ + "gc major done" /* The markers pushed in the shadowstack are an odd number followed by a - regular pointer. When needed, this library invokes this callback to - turn this pair into a human-readable explanation. */ -extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); -extern void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); + regular pointer. */ +typedef struct { + stm_thread_local_t *tl; + char *segment_base; /* base to interpret the 'object' below */ + uintptr_t odd_number; /* marker odd number, or 0 if marker is missing */ + object_t *object; /* marker object, or NULL if marker is missing */ +} stm_loc_marker_t; +extern void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); -/* Conventience macros to push the markers into the shadowstack */ +/* Calling this sets up a stmcb_timing_event callback that will produce + a binary file calling 'profiling_file_name'. After a fork(), it is + written to 'profiling_file_name.fork'. Call it with NULL to + stop profiling. Returns -1 in case of error (see errno then). + The optional 'expand_marker' function pointer is called to expand + the marker's odd_number and object into data, starting at the given + position and with the given maximum length. */ +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)); + + +/* Convenience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ uintptr_t _odd_num = (odd_num); \ assert(_odd_num & 1); \ @@ -477,8 +505,6 @@ _ss->ss = (object_t *)_odd_num; \ } while (0) -char *_stm_expand_marker(void); - /* ==================== END ==================== */ From noreply at buildbot.pypy.org Sat Oct 4 18:19:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:19:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Adapt the code to the new profiling API Message-ID: <20141004161906.841F61C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73768:e4dcb6d27b82 Date: 2014-10-04 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/e4dcb6d27b82/ Log: Adapt the code to the new profiling API diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -34,7 +34,6 @@ void pypy_stm_unregister_thread_local(void) { - stm_flush_timing(&stm_thread_local, 1); // XXX temporary stm_unregister_thread_local(&stm_thread_local); } @@ -43,6 +42,8 @@ /*** HACK: hard-coded logic to expand the marker into ***/ /*** a string, suitable for running in PyPy ***/ +#include /* for getenv() */ + typedef struct pypy_rpy_string0 RPyStringSpace0; static long g_co_filename_ofs; @@ -65,10 +66,12 @@ return (RPyStringSpace0 *)str0; } -static void _stm_expand_marker_for_pypy( - char *segment_base, uintptr_t odd_number, object_t *o, - char *outputbuf, size_t outputbufsize) +static int _stm_expand_marker_for_pypy(stm_loc_marker_t *marker, + char *outputbuf, int outputbufsize) { + if (marker->object == NULL) + return 0; + long co_firstlineno; RPyStringSpace0 *co_filename; RPyStringSpace0 *co_name; @@ -77,58 +80,46 @@ long fnlen = 1, nlen = 1, line = 0; char *fn = "?", *name = "?"; - if (o) { - co_filename =_fetch_rpsspace0(segment_base, o, g_co_filename_ofs); - co_name =_fetch_rpsspace0(segment_base, o, g_co_name_ofs); - co_firstlineno=_fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); - co_lnotab =_fetch_rpsspace0(segment_base, o, g_co_lnotab_ofs); + char *segment_base = marker->segment_base; + object_t * o = marker->object; - long remaining = outputbufsize - 32; - nlen = RPyString_Size(co_name); - name = _RPyString_AsString(co_name); - if (nlen > remaining / 2) { - nlen = remaining / 2; - ntrunc = ">"; - } - remaining -= nlen; + co_filename = _fetch_rpsspace0(segment_base, o, g_co_filename_ofs); + co_name = _fetch_rpsspace0(segment_base, o, g_co_name_ofs); + co_firstlineno = _fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); + co_lnotab = _fetch_rpsspace0(segment_base, o, g_co_lnotab_ofs); - fnlen = RPyString_Size(co_filename); - fn = _RPyString_AsString(co_filename); - if (fnlen > remaining) { - fn += (fnlen - remaining); - fnlen = remaining; - fntrunc = "<"; - } + long remaining = outputbufsize - 32; + nlen = RPyString_Size(co_name); + name = _RPyString_AsString(co_name); + if (nlen > remaining / 2) { + nlen = remaining / 2; + ntrunc = ">"; + } + remaining -= nlen; - long lnotablen = RPyString_Size(co_lnotab); - char *lnotab = _RPyString_AsString(co_lnotab); - uintptr_t next_instr = odd_number >> 1; - line = co_firstlineno; - uintptr_t i, addr = 0; - for (i = 0; i < lnotablen; i += 2) { - addr += ((unsigned char *)lnotab)[i]; - if (addr > next_instr) - break; - line += ((unsigned char *)lnotab)[i + 1]; - } + fnlen = RPyString_Size(co_filename); + fn = _RPyString_AsString(co_filename); + if (fnlen > remaining) { + fn += (fnlen - remaining); + fnlen = remaining; + fntrunc = "<"; } - snprintf(outputbuf, outputbufsize, "File \"%s%.*s\", line %ld, in %.*s%s", - fntrunc, (int)fnlen, fn, line, (int)nlen, name, ntrunc); -} + long lnotablen = RPyString_Size(co_lnotab); + char *lnotab = _RPyString_AsString(co_lnotab); + uintptr_t next_instr = marker->odd_number >> 1; + line = co_firstlineno; + uintptr_t i, addr = 0; + for (i = 0; i < lnotablen; i += 2) { + addr += ((unsigned char *)lnotab)[i]; + if (addr > next_instr) + break; + line += ((unsigned char *)lnotab)[i + 1]; + } -#define REPORT_MINIMUM_TIME 0.0001 /* 0.1 millisecond; xxx tweak */ - -static void _stm_cb_debug_print(const char *cause, double time, - const char *marker) -{ - if (time >= REPORT_MINIMUM_TIME) { - PYPY_DEBUG_START("stm-report"); - fprintf(PYPY_DEBUG_FILE, "%s %s\n%s %.6fs: %s\n", - pypy_debug_threadid, marker, - pypy_debug_threadid, time, cause); - PYPY_DEBUG_STOP("stm-report"); - } + return snprintf(outputbuf, outputbufsize, + "File \"%s%.*s\", line %ld, in %.*s%s", + fntrunc, (int)fnlen, fn, line, (int)nlen, name, ntrunc); } void pypy_stm_setup_expand_marker(long co_filename_ofs, @@ -140,11 +131,8 @@ g_co_name_ofs = co_name_ofs; g_co_firstlineno_ofs = co_firstlineno_ofs; g_co_lnotab_ofs = co_lnotab_ofs; - stmcb_expand_marker = _stm_expand_marker_for_pypy; - PYPY_DEBUG_START("stm-report"); - if (PYPY_HAVE_DEBUG_PRINTS) { - stmcb_debug_print = _stm_cb_debug_print; - } - PYPY_DEBUG_STOP("stm-report"); + char *filename = getenv("PYPYSTM"); + if (filename && filename[0]) + stm_set_timing_log(filename, &_stm_expand_marker_for_pypy); } From noreply at buildbot.pypy.org Sat Oct 4 18:19:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 18:19:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/57b388129192 Message-ID: <20141004161907.B05DB1C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73769:b6a8998e5276 Date: 2014-10-04 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/b6a8998e5276/ Log: import stmgc/57b388129192 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -8f88cdb1d916 +57b388129192 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -399,8 +399,13 @@ { /* used to be more efficient, starting directly an inevitable transaction, but there is no real point any more, I believe */ + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + stm_start_transaction(tl); stm_become_inevitable(tl, "start_inevitable_transaction"); + + stm_rewind_jmp_leaveframe(tl, &rjbuf); } From noreply at buildbot.pypy.org Sat Oct 4 19:35:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 19:35:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Kill the now-deprecated __pypy__.thread.longest_xxx() functions Message-ID: <20141004173550.B6B291C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73770:9e29983c1261 Date: 2014-10-04 19:32 +0200 http://bitbucket.org/pypy/pypy/changeset/9e29983c1261/ Log: Kill the now-deprecated __pypy__.thread.longest_xxx() functions diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -38,8 +38,6 @@ '_atomic_enter': 'interp_atomic.atomic_enter', '_exclusive_atomic_enter': 'interp_atomic.exclusive_atomic_enter', '_atomic_exit': 'interp_atomic.atomic_exit', - 'longest_abort_info': 'interp_atomic.longest_abort_info', - 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', 'hint_commit_soon': 'interp_atomic.hint_commit_soon', 'is_atomic': 'interp_atomic.is_atomic', diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -69,24 +69,6 @@ return space.wrap(giltl.is_atomic) - at unwrap_spec(mintime=float) -def longest_abort_info(space, mintime=0.0): - if space.config.translation.stm: - from rpython.rlib import rstm - if rstm.longest_marker_time() <= mintime: - return space.w_None - a, b, c, d = rstm.longest_abort_info() - return space.newtuple([space.wrap(a), space.wrap(b), - space.wrap(c), space.wrap(d)]) - else: - return space.w_None - -def reset_longest_abort_info(space): - if space.config.translation.stm: - from rpython.rlib.rstm import reset_longest_abort_info - reset_longest_abort_info() - - def hint_commit_soon(space): if space.config.translation.stm: from rpython.rlib.rstm import hint_commit_soon diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -154,22 +154,6 @@ def pop_marker(): llop.stm_pop_marker(lltype.Void) - at dont_look_inside # XXX allow looking inside this function -def longest_marker_time(): - return llop.stm_longest_marker_time(lltype.Float) - - at dont_look_inside -def longest_abort_info(): - state = llop.stm_longest_marker_state(lltype.Signed) - time = llop.stm_longest_marker_time(lltype.Float) - cself = llop.stm_longest_marker_self(rffi.CCHARP) - cother = llop.stm_longest_marker_other(rffi.CCHARP) - return (state, time, rffi.charp2str(cself), rffi.charp2str(cother)) - - at dont_look_inside -def reset_longest_abort_info(): - llop.stm_reset_longest_marker_state(lltype.Void) - # ____________________________________________________________ class _Entry(ExtRegistryEntry): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -456,12 +456,6 @@ 'stm_expand_marker': LLOp(), 'stm_setup_expand_marker_for_pypy': LLOp(), - 'stm_longest_marker_state': LLOp(), - 'stm_longest_marker_time': LLOp(), - 'stm_longest_marker_self': LLOp(), - 'stm_longest_marker_other': LLOp(), - 'stm_reset_longest_marker_state': LLOp(), - # __________ address operations __________ 'boehm_malloc': LLOp(), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -241,28 +241,6 @@ return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( offsets[0], offsets[1], offsets[2], offsets[3]) -def stm_longest_marker_state(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (Signed)stm_thread_local.longest_marker_state;' % (result,) - -def stm_longest_marker_time(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_thread_local.longest_marker_time;' % (result,) - -def stm_longest_marker_self(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_thread_local.longest_marker_self;' % (result,) - -def stm_longest_marker_other(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_thread_local.longest_marker_other;' % (result,) - -def stm_reset_longest_marker_state(funcgen, op): - return ('stm_thread_local.longest_marker_state = 0;\n' - 'stm_thread_local.longest_marker_time = 0.0;\n' - 'stm_thread_local.longest_marker_self[0] = 0;\n' - 'stm_thread_local.longest_marker_other[0] = 0;') - def stm_rewind_jmp_frame(funcgen, op): if len(op.args) == 0: assert op.result.concretetype is lltype.Void diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -255,48 +255,6 @@ data = cbuilder.cmdexec('') assert 'ok\n' in data - def test_abort_info(self): - class Parent(object): - pass - class Foobar(Parent): - pass - globf = Foobar() - - def setxy(globf, retry_counter): - if retry_counter > 1: - globf.xy = 100 + retry_counter - - def check(_, retry_counter): - setxy(globf, retry_counter) - if retry_counter < 3: - rstm.abort_and_retry() - print rstm.longest_marker_time() - print rstm.longest_abort_info() - rstm.reset_longest_abort_info() - print rstm.longest_abort_info() - return 0 - - PS = lltype.Ptr(lltype.GcStruct('S', ('got_exception', OBJECTPTR))) - perform_transaction = rstm.make_perform_transaction(check, PS) - - def main(argv): - Parent().xy = 0 - globf.xy = -2 - globf.yx = 'hi there %d' % len(argv) - - # make sure perform_transaction breaks the transaction: - rstm.hint_commit_soon() - assert rstm.should_break_transaction() - - perform_transaction(lltype.nullptr(PS.TO)) - return 0 - t, cbuilder = self.compile(main) - data = cbuilder.cmdexec('a b') - # - # 6 == STM_TIME_RUN_ABORTED_OTHER - import re; r = re.compile(r'0.00\d+\n\(6, 0.00\d+, , \)\n\(0, 0.00+, , \)\n$') - assert r.match(data) - def test_weakref(self): import weakref class Foo(object): From noreply at buildbot.pypy.org Sat Oct 4 19:38:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Oct 2014 19:38:09 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Kill the logic here too. Message-ID: <20141004173809.8ED4A1C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73771:0d85ba448840 Date: 2014-10-04 19:37 +0200 http://bitbucket.org/pypy/pypy/changeset/0d85ba448840/ Log: Kill the logic here too. diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -17,86 +17,8 @@ def getsegmentlimit(): return 1 - def print_abort_info(mintime=0.0): - pass - def hint_commit_soon(): pass def is_atomic(): return atomic.locked() - - -else: - import re, sys, linecache - - _timing_reasons = [ - "'outside transaction'", - "'run current'", - "'run committed'", - "'run aborted write write'", - "'run aborted write read'", - "'run aborted inevitable'", - "'run aborted other'", - "'wait free segment'", - "'wait write read'", - "'wait inevitable'", - "'wait other'", - "'sync commit soon'", - "'bookkeeping'", - "'minor gc'", - "'major gc'", - "'sync pause'", - ] - _r_line = re.compile(r'File "(.*?)[co]?", line (\d+), in ') - _fullfilenames = {} - - def print_abort_info(mintime=0.0): - info = _thread.longest_abort_info(mintime) - if info is None: - return - - output = [] - with atomic: - output.append("Conflict ") - a, b, c, d = info - try: - reason = _timing_reasons[a] - except IndexError: - reason = "'%s'" % (a,) - output.append(reason) - def show(line): - output.append(" %s\n" % line) - match = _r_line.match(line) - if match and match.group(1) != '?': - filename = match.group(1) - lineno = int(match.group(2)) - if filename.startswith('<') and not filename.endswith('>'): - if filename not in _fullfilenames: - partial = filename[1:] - found = set() - for module in sys.modules.values(): - try: - modfile = object.__getattribute__(module, '__file__') - except Exception: - modfile = None - if type(modfile) is str and modfile.endswith(partial): - found.add(modfile) - if len(found) == 1: - _fullfilenames[filename], = found - else: - _fullfilenames[filename] = None - filename = _fullfilenames[filename] - line = linecache.getline(filename, lineno) - if line: - output.append(" %s\n" % line.strip()) - if d: - output.append(" between two threads:\n") - show(c) - show(d) - else: - output.append(" in this thread:\n") - show(c) - output.append('Lost %.6f seconds.\n\n' % (b,)) - _thread.reset_longest_abort_info() - print >> sys.stderr, "".join(output), From noreply at buildbot.pypy.org Sat Oct 4 21:42:55 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 4 Oct 2014 21:42:55 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: fix test_harmonic() Message-ID: <20141004194255.70C931C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73775:f06b2d847309 Date: 2014-10-04 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/f06b2d847309/ Log: fix test_harmonic() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -857,8 +857,11 @@ s = a.build_types(snippet.harmonic, [int]) assert s.knowntype == float # check that the list produced by range() is not mutated or resized - for value in a.bindings.values(): - s_value = value.ann + graph = graphof(a, snippet.harmonic) + all_vars = set().union(*[block.getvariables() for block in graph.iterblocks()]) + print all_vars + for var in all_vars: + s_value = var.binding.ann if isinstance(s_value, annmodel.SomeList): assert not s_value.listdef.listitem.resized assert not s_value.listdef.listitem.mutated From noreply at buildbot.pypy.org Sun Oct 5 03:39:04 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 03:39:04 +0200 (CEST) Subject: [pypy-commit] pypy default: remove 9-year old 'temporary hack' Message-ID: <20141005013904.C531A1C02ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73776:b28fcdbb3a97 Date: 2014-10-05 02:35 +0100 http://bitbucket.org/pypy/pypy/changeset/b28fcdbb3a97/ Log: remove 9-year old 'temporary hack' diff --git a/rpython/tool/uid.py b/rpython/tool/uid.py --- a/rpython/tool/uid.py +++ b/rpython/tool/uid.py @@ -1,18 +1,6 @@ import struct, sys -# This is temporary hack to run PyPy on PyPy -# until PyPy's struct module handle P format character. -try: - HUGEVAL_FMT = 'P' - HUGEVAL_BYTES = struct.calcsize('P') -except struct.error: - if sys.maxint <= 2147483647: - HUGEVAL_FMT = 'l' - HUGEVAL_BYTES = 4 - else: - HUGEVAL_FMT = 'q' - HUGEVAL_BYTES = 8 - +HUGEVAL_BYTES = struct.calcsize('P') HUGEVAL = 256 ** HUGEVAL_BYTES @@ -39,7 +27,7 @@ real hash/compare for immutable ones. """ __slots__ = ["key", "value"] - + def __init__(self, value): self.value = value # a concrete value # try to be smart about constant mutable or immutable values @@ -74,7 +62,7 @@ # try to limit the size of the repr to make it more readable r = repr(self.value) if (r.startswith('<') and r.endswith('>') and - hasattr(self.value, '__name__')): + hasattr(self.value, '__name__')): r = '%s %s' % (type(self.value).__name__, self.value.__name__) elif len(r) > 60 or (len(r) > 30 and type(self.value) is not str): r = r[:22] + '...' + r[-7:] From noreply at buildbot.pypy.org Sun Oct 5 03:51:01 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 03:51:01 +0200 (CEST) Subject: [pypy-commit] pypy default: We don't need to support Python 2.4 any more Message-ID: <20141005015101.1DB611C02ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73777:55efd716d985 Date: 2014-10-05 02:50 +0100 http://bitbucket.org/pypy/pypy/changeset/55efd716d985/ Log: We don't need to support Python 2.4 any more diff --git a/rpython/tool/uid.py b/rpython/tool/uid.py --- a/rpython/tool/uid.py +++ b/rpython/tool/uid.py @@ -1,4 +1,4 @@ -import struct, sys +import struct HUGEVAL_BYTES = struct.calcsize('P') HUGEVAL = 256 ** HUGEVAL_BYTES @@ -9,15 +9,7 @@ result += HUGEVAL return result -if sys.version_info < (2, 5): - def uid(obj): - """ - Return the id of an object as an unsigned number so that its hex - representation makes sense - """ - return fixid(id(obj)) -else: - uid = id # guaranteed to be positive from CPython 2.5 onwards +uid = id # guaranteed to be positive from CPython 2.5 onwards class Hashable(object): From noreply at buildbot.pypy.org Sun Oct 5 11:09:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 11:09:25 +0200 (CEST) Subject: [pypy-commit] stmgc c7-full-profiling: Works, merging Message-ID: <20141005090925.7F07F1C01D1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-full-profiling Changeset: r1454:a6344eb3dbdd Date: 2014-10-05 11:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/a6344eb3dbdd/ Log: Works, merging From noreply at buildbot.pypy.org Sun Oct 5 11:09:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 11:09:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: hg merge c7-full-profiling Message-ID: <20141005090927.5DD551C01D1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1455:cc49f150515a Date: 2014-10-05 11:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/cc49f150515a/ Log: hg merge c7-full-profiling Change the way we get profiling information: now we get all events invoking a callback (transaction start/commit, conflicts, etc.), and a default implementation of this callback writes to a log file. Found out that it's not too voluminous anyway. diff too long, truncating to 2000 out of 2085 lines diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -3,6 +3,7 @@ #include #include #include +#include #ifdef USE_HTM # include "../../htm-c7/stmgc.h" @@ -59,12 +60,25 @@ } void stmcb_commit_soon() {} -static void expand_marker(char *base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize) +static void timing_event(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers) { - assert(following_object == NULL); - snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); + static char *event_names[] = { STM_EVENT_NAMES }; + + char buf[1024], *p; + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + + p = buf; + p += sprintf(p, "{%.9f} %p %s", tp.tv_sec + 0.000000001 * tp.tv_nsec, + tl, event_names[event]); + if (markers != NULL) { + p += sprintf(p, ", markers: %lu, %lu", + markers[0].odd_number, markers[1].odd_number); + } + sprintf(p, "\n"); + fputs(buf, stderr); } @@ -108,18 +122,6 @@ stm_start_transaction(&stm_thread_local); - if (stm_thread_local.longest_marker_state != 0) { - fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", - &stm_thread_local, - stm_thread_local.longest_marker_state, - stm_thread_local.longest_marker_time); - fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", - stm_thread_local.longest_marker_self, - stm_thread_local.longest_marker_other); - stm_thread_local.longest_marker_state = 0; - stm_thread_local.longest_marker_time = 0.0; - } - nodeptr_t prev = initial; stm_read((objptr_t)prev); @@ -223,7 +225,6 @@ void unregister_thread_local(void) { - stm_flush_timing(&stm_thread_local, 1); stm_unregister_thread_local(&stm_thread_local); } @@ -303,7 +304,7 @@ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - stmcb_expand_marker = expand_marker; + stmcb_timing_event = timing_event; setup_list(); diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,34 +3,50 @@ #endif -enum contention_kind_e { +/* Here are the possible kinds of contention: - /* A write-write contention occurs when we running our transaction - and detect that we are about to write to an object that another - thread is also writing to. This kind of contention must be - resolved before continuing. This *must* abort one of the two - threads: the caller's thread is not at a safe-point, so cannot - wait! */ - WRITE_WRITE_CONTENTION, + STM_CONTENTION_WRITE_WRITE - /* A write-read contention occurs when we are trying to commit: it + A write-write contention occurs when we are running our + transaction and detect that we are about to write to an object + that another thread is also writing to. This kind of + contention must be resolved before continuing. This *must* + abort one of the two threads: the caller's thread is not at a + safe-point, so cannot wait! + + It is reported as a timing event with the following two markers: + the current thread (i.e. where the second-in-time write occurs); + and the other thread (from its 'modified_old_objects_markers', + where the first-in-time write occurred). + + STM_CONTENTION_WRITE_READ + + A write-read contention occurs when we are trying to commit: it means that an object we wrote to was also read by another transaction. Even though it would seem obvious that we should just abort the other thread and proceed in our commit, a more subtle answer would be in some cases to wait for the other thread to commit first. It would commit having read the old value, and - then we can commit our change to it. */ - WRITE_READ_CONTENTION, + then we can commit our change to it. - /* An inevitable contention occurs when we're trying to become + It is reported as a timing event with only one marker: the + older location of the write that was done by the current thread. + + STM_CONTENTION_INEVITABLE + + An inevitable contention occurs when we're trying to become inevitable but another thread already is. We can never abort the other thread in this case, but we still have the choice to abort - ourselves or pause until the other thread commits. */ - INEVITABLE_CONTENTION, -}; + ourselves or pause until the other thread commits. + + It is reported with two markers, one for the current thread and + one for the other thread. Each marker gives the location that + attempts to make the transaction inevitable. +*/ + struct contmgr_s { - enum contention_kind_e kind; + enum stm_event_e kind; struct stm_priv_segment_info_s *other_pseg; bool abort_other; bool try_sleep; // XXX add a way to timeout, but should handle repeated @@ -99,7 +115,7 @@ static bool contention_management(uint8_t other_segment_num, - enum contention_kind_e kind, + enum stm_event_e kind, object_t *obj) { assert(_has_mutex()); @@ -109,6 +125,9 @@ if (must_abort()) abort_with_mutex(); + /* Report the contention */ + timing_contention(kind, other_segment_num, obj); + /* Who should abort here: this thread, or the other thread? */ struct contmgr_s contmgr; contmgr.kind = kind; @@ -138,20 +157,9 @@ contmgr.abort_other = false; } - - int wait_category = - kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : - STM_TIME_WAIT_OTHER; - - int abort_category = - kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : - kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : - STM_TIME_RUN_ABORTED_OTHER; - - - if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && + /* Do one of three things here... + */ + if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { others_may_have_run = true; /* Sleep. @@ -164,14 +172,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; - marker_contention(kind, false, other_segment_num, obj); - - change_timing_state(wait_category); /* tell the other to commit ASAP */ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); @@ -180,14 +186,6 @@ if (must_abort()) abort_with_mutex(); - - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - double elapsed = - change_timing_state_tl(pseg->pub.running_thread, - STM_TIME_RUN_CURRENT); - marker_copy(pseg->pub.running_thread, pseg, - wait_category, elapsed); } else if (!contmgr.abort_other) { @@ -195,16 +193,13 @@ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("abort in contention: kind %d\n", kind)); - STM_SEGMENT->nursery_end = abort_category; - marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = abort_category; - marker_contention(kind, true, other_segment_num, obj); + contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -296,7 +291,8 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); + contention_management(other_segment_num, + STM_CONTENTION_WRITE_WRITE, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -308,10 +304,12 @@ static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, + STM_CONTENTION_WRITE_READ, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); + contention_management(other_segment_num, + STM_CONTENTION_INEVITABLE, NULL); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -124,17 +124,13 @@ dprintf_test(("write_slowpath %p -> mod_old\n", obj)); - /* First change to this old object from this transaction. + /* Add the current marker, recording where we wrote to this object */ + timing_record_write(); + + /* Change to this old object from this transaction. Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); - /* Add the current marker, recording where we wrote to this object */ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->modified_old_objects_markers = - list_append2(STM_PSEGMENT->modified_old_objects_markers, - marker[0], marker[1]); - release_marker_lock(STM_SEGMENT->segment_base); /* We need to privatize the pages containing the object, if they @@ -328,29 +324,24 @@ STM_SEGMENT->transaction_read_version = 1; } -static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) +static uint64_t _global_start_time = 0; + +static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - retry: - if (inevitable) { - wait_for_end_of_inevitable_transaction(tl); - } - - if (!acquire_thread_segment(tl)) - goto retry; + while (!acquire_thread_segment(tl)) + ; /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - change_timing_state(STM_TIME_RUN_CURRENT); - STM_PSEGMENT->start_time = tl->_timing_cur_start; + timing_event(tl, STM_TRANSACTION_START); + STM_PSEGMENT->start_time = _global_start_time++; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; - STM_PSEGMENT->marker_inev[1] = 0; - if (inevitable) - marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); + STM_PSEGMENT->marker_inev.object = NULL; + STM_PSEGMENT->transaction_state = TS_REGULAR; #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -399,14 +390,21 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif - _stm_start_transaction(tl, false); + _stm_start_transaction(tl); return repeat_count; } void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - s_mutex_lock(); - _stm_start_transaction(tl, true); + /* used to be more efficient, starting directly an inevitable transaction, + but there is no real point any more, I believe */ + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + + stm_start_transaction(tl); + stm_become_inevitable(tl, "start_inevitable_transaction"); + + stm_rewind_jmp_leaveframe(tl, &rjbuf); } @@ -449,7 +447,10 @@ return true; } /* we aborted the other transaction without waiting, so - we can just continue */ + we can just break out of this loop on + modified_old_objects and continue with the next + segment */ + break; } })); } @@ -783,13 +784,13 @@ list_clear(STM_PSEGMENT->modified_old_objects_markers); } -static void _finish_transaction(int attribute_to) +static void _finish_transaction(enum stm_event_e event) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; /* marker_inev is not needed anymore */ - STM_PSEGMENT->marker_inev[1] = 0; + STM_PSEGMENT->marker_inev.object = NULL; /* reset these lists to NULL for the next transaction */ _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); @@ -797,9 +798,9 @@ list_clear(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); - timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + timing_event(tl, event); - stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } @@ -812,9 +813,6 @@ minor_collection(/*commit=*/ true); - /* the call to minor_collection() above leaves us with - STM_TIME_BOOKKEEPING */ - /* synchronize overflow objects living in privatized pages */ push_overflow_objects_from_privatized_pages(); @@ -838,9 +836,9 @@ /* if a major collection is required, do it here */ if (is_major_collection_requested()) { - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); major_collection_now_at_safe_point(); - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } /* synchronize modified old objects to other threads */ @@ -867,7 +865,7 @@ } /* done */ - _finish_transaction(STM_TIME_RUN_COMMITTED); + _finish_transaction(STM_TRANSACTION_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -960,10 +958,6 @@ (int)pseg->transaction_state); } - /* if we don't have marker information already, look up and preserve - the marker information from the shadowstack as a string */ - marker_default_for_abort(pseg); - /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -1052,16 +1046,13 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - int attribute_to = STM_TIME_RUN_ABORTED_OTHER; - if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ - attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(attribute_to); + _finish_transaction(STM_TRANSACTION_ABORT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -1103,8 +1094,8 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - marker_fetch_inev(); - wait_for_end_of_inevitable_transaction(NULL); + timing_fetch_inev(); + wait_for_end_of_inevitable_transaction(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); invoke_and_clear_user_callbacks(0); /* for commit */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -138,7 +138,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - double start_time; + uint64_t start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the @@ -196,10 +196,8 @@ pthread_t running_pthread; #endif - /* Temporarily stores the marker information */ - char marker_self[_STM_MARKER_LEN]; - char marker_other[_STM_MARKER_LEN]; - uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ + /* marker where this thread became inevitable */ + stm_loc_marker_t marker_inev; }; enum /* safe_point */ { diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -55,14 +55,12 @@ s_mutex_unlock(); bool was_in_transaction = _stm_in_transaction(this_tl); - if (was_in_transaction) { - stm_become_inevitable(this_tl, "fork"); - /* Note that the line above can still fail and abort, which should - be fine */ - } - else { - stm_start_inevitable_transaction(this_tl); - } + if (!was_in_transaction) + stm_start_transaction(this_tl); + + stm_become_inevitable(this_tl, "fork"); + /* Note that the line above can still fail and abort, which should + be fine */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -187,7 +185,6 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif - strcpy(pr->marker_self, "fork"); tl->shadowstack = NULL; pr->shadowstack_at_start_of_transaction = NULL; stm_rewind_jmp_forget(tl); @@ -204,6 +201,9 @@ just release these locks early */ s_mutex_unlock(); + /* Open a new profiling file, if any */ + forksupport_open_new_profiling_file(); + /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -141,7 +141,7 @@ if (is_major_collection_requested()) { /* if still true */ - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -149,7 +149,7 @@ major_collection_now_at_safe_point(); } - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } s_mutex_unlock(); @@ -446,9 +446,9 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->marker_inev[1]) { - uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; - mark_visit_object((object_t *)marker_inev_obj, base); + if (get_priv_segment(j)->marker_inev.segment_base) { + object_t *marker_inev_obj = get_priv_segment(j)->marker_inev.object; + mark_visit_object(marker_inev_obj, base); } } } diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -3,18 +3,11 @@ #endif -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); - -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); - - -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +static void marker_fetch(stm_loc_marker_t *out_marker) { - /* fetch the current marker from the tl's shadow stack, - and return it in 'marker[2]'. */ + /* Fetch the current marker from the 'out_marker->tl's shadow stack, + and return it in 'out_marker->odd_number' and 'out_marker->object'. */ + stm_thread_local_t *tl = out_marker->tl; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; @@ -28,85 +21,31 @@ } if (current != base) { /* found the odd marker */ - marker[0] = (uintptr_t)current[0].ss; - marker[1] = (uintptr_t)current[1].ss; + out_marker->odd_number = (uintptr_t)current[0].ss; + out_marker->object = current[1].ss; } else { /* no marker found */ - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } } -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker) +static void _timing_fetch_inev(void) { - /* Expand the marker given by 'marker[2]' into a full string. This - works assuming that the marker was produced inside the segment - given by 'segment_base'. If that's from a different thread, you - must first acquire the corresponding 'marker_lock'. */ - assert(_has_mutex()); - outmarker[0] = 0; - if (marker[0] == 0) - return; /* no marker entry found */ - if (stmcb_expand_marker != NULL) { - stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], - outmarker, _STM_MARKER_LEN); - } + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); + STM_PSEGMENT->marker_inev.odd_number = marker.odd_number; + STM_PSEGMENT->marker_inev.object = marker.object; } -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker) { - if (pseg->marker_self[0] != 0) - return; /* already collected an entry */ - - uintptr_t marker[2]; - marker_fetch(pseg->pub.running_thread, marker); - marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); - pseg->marker_other[0] = 0; -} - -char *_stm_expand_marker(void) -{ - /* for tests only! */ - static char _result[_STM_MARKER_LEN]; - uintptr_t marker[2]; - _result[0] = 0; - s_mutex_lock(); - marker_fetch(STM_SEGMENT->running_thread, marker); - marker_expand(marker, STM_SEGMENT->segment_base, _result); - s_mutex_unlock(); - return _result; -} - -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time) -{ - /* Copies the marker information from pseg to tl. This is called - indirectly from abort_with_mutex(), but only if the lost time is - greater than that of the previous recorded marker. By contrast, - pseg->marker_self has been filled already in all cases. The - reason for the two steps is that we must fill pseg->marker_self - earlier than now (some objects may be GCed), but we only know - here the total time it gets attributed. + /* From 'out_marker->tl', fill in 'out_marker->segment_base' and + 'out_marker->odd_number' and 'out_marker->object' from the + marker associated with writing the 'obj'. */ - if (stmcb_debug_print) { - stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); - } - if (time * 0.99 > tl->longest_marker_time) { - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; - memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); - memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); - } - pseg->marker_self[0] = 0; - pseg->marker_other[0] = 0; -} - -static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, - uintptr_t marker[2]) -{ assert(_has_mutex()); /* here, we acquired the other thread's marker_lock, which means that: @@ -118,80 +57,86 @@ the global mutex_lock at this point too). */ long i; + int in_segment_num = out_marker->tl->associated_segment_num; struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); struct list_s *mlst = pseg->modified_old_objects; struct list_s *mlstm = pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { + assert(list_count(mlstm) <= 2 * list_count(mlst)); + for (i = list_count(mlstm) / 2; --i >= 0; ) { if (list_item(mlst, i) == (uintptr_t)obj) { - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); + out_marker->odd_number = list_item(mlstm, i * 2 + 0); + out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1); return; } } - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj) +static void _timing_record_write(void) { - uintptr_t self_marker[2]; - uintptr_t other_marker[2]; - struct stm_priv_segment_info_s *my_pseg, *other_pseg; + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + long base_count = list_count(STM_PSEGMENT->modified_old_objects); + struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers; + while (list_count(mlstm) < 2 * base_count) { + mlstm = list_append2(mlstm, 0, 0); + } + mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object); + STM_PSEGMENT->modified_old_objects_markers = mlstm; +} + +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj) +{ + struct stm_priv_segment_info_s *other_pseg; other_pseg = get_priv_segment(other_segment_num); - char *my_segment_base = STM_SEGMENT->segment_base; - char *other_segment_base = get_segment_base(other_segment_num); + char *other_segment_base = other_pseg->pub.segment_base; + acquire_marker_lock(other_segment_base); - acquire_marker_lock(other_segment_base); + stm_loc_marker_t markers[2]; /* Collect the location for myself. It's usually the current location, except in a write-read abort, in which case it's the older location of the write. */ - if (kind == WRITE_READ_CONTENTION) - marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + markers[0].tl = STM_SEGMENT->running_thread; + markers[0].segment_base = STM_SEGMENT->segment_base; + + if (kind == STM_CONTENTION_WRITE_READ) + marker_fetch_obj_write(obj, &markers[0]); else - marker_fetch(my_pseg->pub.running_thread, self_marker); - - /* Expand this location into either my_pseg->marker_self or - other_pseg->marker_other, depending on who aborts. */ - marker_expand(self_marker, my_segment_base, - abort_other ? other_pseg->marker_other - : my_pseg->marker_self); + marker_fetch(&markers[0]); /* For some categories, we can also collect the relevant information for the other segment. */ - char *outmarker = abort_other ? other_pseg->marker_self - : my_pseg->marker_other; + markers[1].tl = other_pseg->pub.running_thread; + markers[1].segment_base = other_pseg->pub.segment_base; + switch (kind) { - case WRITE_WRITE_CONTENTION: - marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + case STM_CONTENTION_WRITE_WRITE: + marker_fetch_obj_write(obj, &markers[1]); break; - case INEVITABLE_CONTENTION: - assert(abort_other == false); - other_marker[0] = other_pseg->marker_inev[0]; - other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); - break; - case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); + case STM_CONTENTION_INEVITABLE: + markers[1].odd_number = other_pseg->marker_inev.odd_number; + markers[1].object = other_pseg->marker_inev.object; break; default: - outmarker[0] = 0; + markers[1].odd_number = 0; + markers[1].object = NULL; break; } + stmcb_timing_event(markers[0].tl, kind, markers); + + /* only release the lock after stmcb_timing_event(), otherwise it could + run into race conditions trying to interpret 'markers[1].object' */ release_marker_lock(other_segment_base); } -static void marker_fetch_inev(void) -{ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->marker_inev[0] = marker[0]; - STM_PSEGMENT->marker_inev[1] = marker[1]; -} + +void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,12 +1,19 @@ -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); -static void marker_fetch_inev(void); -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker); -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time); +static void _timing_record_write(void); +static void _timing_fetch_inev(void); +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj); -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj); + +#define timing_event(tl, event) \ + (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) : (void)0) + +#define timing_record_write() \ + (stmcb_timing_event != NULL ? _timing_record_write() : (void)0) + +#define timing_fetch_inev() \ + (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0) + +#define timing_contention(kind, other_segnum, obj) \ + (stmcb_timing_event != NULL ? \ + _timing_contention(kind, other_segnum, obj) : (void)0) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -425,11 +425,13 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->marker_inev[1]) { - uintptr_t *pmarker_inev_obj = (uintptr_t *) + if (STM_PSEGMENT->marker_inev.segment_base) { + assert(STM_PSEGMENT->marker_inev.segment_base == + STM_SEGMENT->segment_base); + object_t **pmarker_inev_obj = (object_t **) REAL_ADDRESS(STM_SEGMENT->segment_base, - &STM_PSEGMENT->marker_inev[1]); - minor_trace_if_young((object_t **)pmarker_inev_obj); + &STM_PSEGMENT->marker_inev.object); + minor_trace_if_young(pmarker_inev_obj); } } @@ -572,11 +574,11 @@ stm_safe_point(); - change_timing_state(STM_TIME_MINOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START); _do_minor_collection(commit); - change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE); } void stm_collect(long level) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,7 +1,13 @@ -/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER -#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON +/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */ +#define NSE_SIGABORT 1 +#define NSE_SIGPAUSE 2 +#define NSE_SIGCOMMITSOON 3 +#define _NSE_NUM_SIGNALS 4 + +#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX +# error "increase _STM_NSE_SIGNAL_MAX" +#endif static uint32_t highest_overflow_number; diff --git a/c7/stm/prof.c b/c7/stm/prof.c new file mode 100644 --- /dev/null +++ b/c7/stm/prof.c @@ -0,0 +1,102 @@ +#include + + +static FILE *profiling_file; +static char *profiling_basefn = NULL; +static int (*profiling_expand_marker)(stm_loc_marker_t *, char *, int); + + +static void _stm_profiling_event(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers) +{ + struct buf_s { + uint32_t tv_sec; + uint32_t tv_nsec; + uint32_t thread_num; + uint8_t event; + uint8_t marker_length[2]; + char extra[256]; + } __attribute__((packed)); + + struct buf_s buf; + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + buf.tv_sec = t.tv_sec; + buf.tv_nsec = t.tv_nsec; + buf.thread_num = tl->thread_local_counter; + buf.event = event; + + int len0 = 0; + int len1 = 0; + if (markers != NULL) { + if (markers[0].odd_number != 0) + len0 = profiling_expand_marker(&markers[0], buf.extra, 128); + if (markers[1].odd_number != 0) + len1 = profiling_expand_marker(&markers[1], buf.extra + len0, 128); + } + buf.marker_length[0] = len0; + buf.marker_length[1] = len1; + + fwrite(&buf, offsetof(struct buf_s, extra) + len0 + len1, + 1, profiling_file); +} + +static int default_expand_marker(stm_loc_marker_t *m, char *p, int s) +{ + *(uintptr_t *)p = m->odd_number; + return sizeof(uintptr_t); +} + +static bool open_timing_log(const char *filename) +{ + profiling_file = fopen(filename, "w"); + if (profiling_file == NULL) + return false; + + fwrite("STMGC-C7-PROF01\n", 16, 1, profiling_file); + stmcb_timing_event = _stm_profiling_event; + return true; +} + +static bool close_timing_log(void) +{ + if (stmcb_timing_event == &_stm_profiling_event) { + stmcb_timing_event = NULL; + fclose(profiling_file); + profiling_file = NULL; + return true; + } + return false; +} + +static void forksupport_open_new_profiling_file(void) +{ + if (close_timing_log() && profiling_basefn != NULL) { + char filename[1024]; + snprintf(filename, sizeof(filename), + "%s.fork%ld", profiling_basefn, (long)getpid()); + open_timing_log(filename); + } +} + +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)) +{ + close_timing_log(); + free(profiling_basefn); + profiling_basefn = NULL; + + if (profiling_file_name == NULL) + return 0; + + if (!expand_marker) + expand_marker = default_expand_marker; + profiling_expand_marker = expand_marker; + + if (!open_timing_log(profiling_file_name)) + return -1; + + profiling_basefn = strdup(profiling_file_name); + return 0; +} diff --git a/c7/stm/prof.h b/c7/stm/prof.h new file mode 100644 --- /dev/null +++ b/c7/stm/prof.h @@ -0,0 +1,2 @@ + +static void forksupport_open_new_profiling_file(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -22,8 +22,8 @@ static char *setup_mmap(char *reason, int *map_fd) { char name[128]; - sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", - (long)getpid(), get_stm_time()); + sprintf(name, "/stmgc-c7-bigmem-%ld", + (long)getpid()); /* Create the big shared memory object, and immediately unlink it. There is a small window where if this process is killed the @@ -225,6 +225,8 @@ return (pthread_t *)(tl->creating_pthread); } +static int thread_local_counters = 0; + void stm_register_thread_local(stm_thread_local_t *tl) { int num; @@ -241,14 +243,13 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; - tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; - tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + tl->thread_local_counter = ++thread_local_counters; *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -123,32 +123,19 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction( - stm_thread_local_t *tl_or_null_if_can_abort) +static void wait_for_end_of_inevitable_transaction(void) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->transaction_state == TS_INEVITABLE) { - if (tl_or_null_if_can_abort == NULL) { - /* handle this case like a contention: it will either - abort us (not the other thread, which is inevitable), - or wait for a while. If we go past this call, then we - waited; in this case we have to re-check if no other - thread is inevitable. */ - inevitable_contention_management(i); - } - else { - /* wait for stm_commit_transaction() to finish this - inevitable transaction */ - signal_other_to_commit_soon(other_pseg); - change_timing_state_tl(tl_or_null_if_can_abort, - STM_TIME_WAIT_INEVITABLE); - cond_wait(C_INEVITABLE); - /* don't bother changing the timing state again: the caller - will very soon go to STM_TIME_RUN_CURRENT */ - } + /* handle this case like a contention: it will either + abort us (not the other thread, which is inevitable), + or wait for a while. If we go past this call, then we + waited; in this case we have to re-check if no other + thread is inevitable. */ + inevitable_contention_management(i); goto restart; } } @@ -188,8 +175,9 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ - change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); + timing_event(tl, STM_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); + timing_event(tl, STM_WAIT_DONE); /* Return false to the caller, which will call us again */ return false; @@ -331,7 +319,6 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) return; /* fast path: no safe point requested */ - int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -342,10 +329,6 @@ break; /* no safe point requested */ if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); - } - STM_PSEGMENT->signalled_to_commit_soon = true; stmcb_commit_soon(); if (!pause_signalled) { @@ -362,17 +345,12 @@ #ifdef STM_TESTS abort_with_mutex(); #endif - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); - } + timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; - } - - if (previous_state != -1) { - change_timing_state(previous_state); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); } } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,7 +28,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); +static void wait_for_end_of_inevitable_transaction(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/c7/stm/timing.c b/c7/stm/timing.c deleted file mode 100644 --- a/c7/stm/timing.c +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef _STM_CORE_H_ -# error "must be compiled via stmgc.c" -#endif - - -static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, - double elapsed) -{ - tl->timing[category] += elapsed; - tl->events[category] += 1; -} - -#define TIMING_CHANGE(tl, newstate) \ - double curtime = get_stm_time(); \ - double elasped = curtime - tl->_timing_cur_start; \ - enum stm_time_e oldstate = tl->_timing_cur_state; \ - add_timing(tl, oldstate, elasped); \ - tl->_timing_cur_state = newstate; \ - tl->_timing_cur_start = curtime - -static enum stm_time_e change_timing_state(enum stm_time_e newstate) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, newstate); - return oldstate; -} - -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) -{ - TIMING_CHANGE(tl, newstate); - return elasped; -} - -static void timing_end_transaction(enum stm_time_e attribute_to) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; - add_timing(tl, attribute_to, time_this_transaction); - tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; - - if (attribute_to != STM_TIME_RUN_COMMITTED) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - marker_copy(tl, pseg, attribute_to, time_this_transaction); - } -} - -static const char *timer_names[] = { - "outside transaction", - "run current", - "run committed", - "run aborted write write", - "run aborted write read", - "run aborted inevitable", - "run aborted other", - "wait free segment", - "wait write read", - "wait inevitable", - "wait other", - "sync commit soon", - "bookkeeping", - "minor gc", - "major gc", - "sync pause", -}; - -void stm_flush_timing(stm_thread_local_t *tl, int verbose) -{ - enum stm_time_e category = tl->_timing_cur_state; - uint64_t oldevents = tl->events[category]; - TIMING_CHANGE(tl, category); - tl->events[category] = oldevents; - - assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); - if (verbose > 0) { - int i; - s_mutex_lock(); - fprintf(stderr, "thread %p:\n", tl); - for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %8.3f s\n", - timer_names[i], tl->events[i], (double)tl->timing[i]); - } - fprintf(stderr, " %-24s %6s %11.6f s\n", - "longest recorded marker", "", tl->longest_marker_time); - fprintf(stderr, " \"%.*s\"\n", - (int)_STM_MARKER_LEN, tl->longest_marker_self); - s_mutex_unlock(); - } -} diff --git a/c7/stm/timing.h b/c7/stm/timing.h deleted file mode 100644 --- a/c7/stm/timing.h +++ /dev/null @@ -1,14 +0,0 @@ -#include - -static inline double get_stm_time(void) -{ - struct timespec tp; - clock_gettime(CLOCK_MONOTONIC, &tp); - return tp.tv_sec + tp.tv_nsec * 0.000000001; -} - -static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); - -static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,8 +14,8 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" -#include "stm/timing.h" #include "stm/marker.h" +#include "stm/prof.h" #include "stm/misc.c" #include "stm/list.c" @@ -34,6 +34,6 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" -#include "stm/timing.c" #include "stm/marker.c" +#include "stm/prof.c" #include "stm/rewind_setjmp.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,28 +54,6 @@ object_t *ss; }; -enum stm_time_e { - STM_TIME_OUTSIDE_TRANSACTION, - STM_TIME_RUN_CURRENT, - STM_TIME_RUN_COMMITTED, - STM_TIME_RUN_ABORTED_WRITE_WRITE, - STM_TIME_RUN_ABORTED_WRITE_READ, - STM_TIME_RUN_ABORTED_INEVITABLE, - STM_TIME_RUN_ABORTED_OTHER, - STM_TIME_WAIT_FREE_SEGMENT, - STM_TIME_WAIT_WRITE_READ, - STM_TIME_WAIT_INEVITABLE, - STM_TIME_WAIT_OTHER, - STM_TIME_SYNC_COMMIT_SOON, - STM_TIME_BOOKKEEPING, - STM_TIME_MINOR_GC, - STM_TIME_MAJOR_GC, - STM_TIME_SYNC_PAUSE, - _STM_TIME_N -}; - -#define _STM_MARKER_LEN 80 - typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -88,20 +66,11 @@ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; /* after an abort, some details about the abort are stored there. - (these fields are not modified on a successful commit) */ + (this field is not modified on a successful commit) */ long last_abort__bytes_in_nursery; - /* timing information, accumulated */ - uint32_t events[_STM_TIME_N]; - float timing[_STM_TIME_N]; - double _timing_cur_start; - enum stm_time_e _timing_cur_state; - /* the marker with the longest associated time so far */ - enum stm_time_e longest_marker_state; - double longest_marker_time; - char longest_marker_self[_STM_MARKER_LEN]; - char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; + int thread_local_counter; struct stm_thread_local_s *prev, *next; void *creating_pthread[2]; } stm_thread_local_t; @@ -155,7 +124,7 @@ #define _STM_CARD_SIZE 32 /* must be >= 32 */ #define _STM_MIN_CARD_COUNT 17 #define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT) -#define _STM_NSE_SIGNAL_MAX _STM_TIME_N +#define _STM_NSE_SIGNAL_MAX 7 #define _STM_FAST_ALLOC (66*1024) @@ -438,20 +407,79 @@ const char *msg); -/* Temporary? */ -void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* Profiling events. In the comments: content of the markers, if any */ +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ + STM_TRANSACTION_START, + STM_TRANSACTION_COMMIT, + STM_TRANSACTION_ABORT, + /* contention; see details at the start of contention.c */ + STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */ + STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing */ + STM_CONTENTION_INEVITABLE, /* markers: self loc / other inev loc */ + + /* following a contention, we get from the same thread one of: + STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), + or STM_WAIT_CONTENTION (self-wait). */ + STM_ABORTING_OTHER_CONTENTION, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNC_PAUSE, + STM_WAIT_CONTENTION, + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_DONE, + STM_GC_MAJOR_START, + STM_GC_MAJOR_DONE, + + _STM_EVENT_N +}; + +#define STM_EVENT_NAMES \ + "transaction start", \ + "transaction commit", \ + "transaction abort", \ + "contention write write", \ + "contention write read", \ + "contention inevitable", \ + "aborting other contention", \ + "wait free segment", \ + "wait sync pause", \ + "wait contention", \ + "wait done", \ + "gc minor start", \ + "gc minor done", \ + "gc major start", \ + "gc major done" /* The markers pushed in the shadowstack are an odd number followed by a - regular pointer. When needed, this library invokes this callback to - turn this pair into a human-readable explanation. */ -extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); -extern void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); + regular pointer. */ +typedef struct { + stm_thread_local_t *tl; + char *segment_base; /* base to interpret the 'object' below */ + uintptr_t odd_number; /* marker odd number, or 0 if marker is missing */ + object_t *object; /* marker object, or NULL if marker is missing */ +} stm_loc_marker_t; +extern void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); -/* Conventience macros to push the markers into the shadowstack */ +/* Calling this sets up a stmcb_timing_event callback that will produce + a binary file calling 'profiling_file_name'. After a fork(), it is + written to 'profiling_file_name.fork'. Call it with NULL to + stop profiling. Returns -1 in case of error (see errno then). + The optional 'expand_marker' function pointer is called to expand + the marker's odd_number and object into data, starting at the given + position and with the given maximum length. */ +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)); + + +/* Convenience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ uintptr_t _odd_num = (odd_num); \ assert(_odd_num & 1); \ @@ -476,8 +504,6 @@ _ss->ss = (object_t *)_odd_num; \ } while (0) -char *_stm_expand_marker(void); - /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -24,12 +24,6 @@ size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; int associated_segment_num; - uint32_t events[]; - float timing[]; - int longest_marker_state; - double longest_marker_time; - char longest_marker_self[]; - char longest_marker_other[]; ...; } stm_thread_local_t; @@ -113,34 +107,57 @@ long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); -#define STM_TIME_OUTSIDE_TRANSACTION ... -#define STM_TIME_RUN_CURRENT ... -#define STM_TIME_RUN_COMMITTED ... -#define STM_TIME_RUN_ABORTED_WRITE_WRITE ... -#define STM_TIME_RUN_ABORTED_WRITE_READ ... -#define STM_TIME_RUN_ABORTED_INEVITABLE ... -#define STM_TIME_RUN_ABORTED_OTHER ... -#define STM_TIME_WAIT_FREE_SEGMENT ... -#define STM_TIME_WAIT_WRITE_READ ... -#define STM_TIME_WAIT_INEVITABLE ... -#define STM_TIME_WAIT_OTHER ... -#define STM_TIME_BOOKKEEPING ... -#define STM_TIME_MINOR_GC ... -#define STM_TIME_MAJOR_GC ... -#define STM_TIME_SYNC_PAUSE ... +/* Profiling events. In the comments: content of the markers, if any */ +enum stm_event_e { + /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */ + STM_TRANSACTION_START, + STM_TRANSACTION_COMMIT, + STM_TRANSACTION_ABORT, -void stm_flush_timing(stm_thread_local_t *, int); + /* contention; see details at the start of contention.c */ + STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */ + STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing */ + STM_CONTENTION_INEVITABLE, /* markers: self inev loc / other inev loc */ -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); + /* following a contention, we get from the same thread one of: + STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), + or STM_WAIT_CONTENTION (self-wait). */ + STM_ABORTING_OTHER_CONTENTION, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNC_PAUSE, + STM_WAIT_CONTENTION, + STM_WAIT_DONE, + + /* start and end of GC cycles */ + STM_GC_MINOR_START, + STM_GC_MINOR_DONE, + STM_GC_MAJOR_START, + STM_GC_MAJOR_DONE, + ... +}; + +typedef struct { + stm_thread_local_t *tl; + /* If segment_base==NULL, the remaining fields are undefined. If non-NULL, + the rest is a marker to interpret from this segment_base addr. */ + char *segment_base; + uintptr_t odd_number; + object_t *object; +} stm_loc_marker_t; + +typedef void (*stmcb_timing_event_fn)(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers); +stmcb_timing_event_fn stmcb_timing_event; + +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); -char *_stm_expand_marker(void); """) @@ -540,8 +557,7 @@ self.current_thread = 0 def teardown_method(self, meth): - lib.stmcb_expand_marker = ffi.NULL - lib.stmcb_debug_print = ffi.NULL + lib.stmcb_timing_event = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! @@ -627,7 +643,7 @@ self.push_root(ffi.cast("object_t *", 8)) def check_char_everywhere(self, obj, expected_content, offset=HDR): - for i in range(len(self.tls)): + for i in range(len(self.tls) + 1): addr = lib._stm_get_segment_base(i) content = addr[int(ffi.cast("uintptr_t", obj)) + offset] assert content == expected_content diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -1,8 +1,41 @@ from support import * import py, time + class TestMarker(BaseTest): + def recording(self, kind): + seen = [] + @ffi.callback("stmcb_timing_event_fn") + def timing_event(tl, event, markers): + if kind == "ALL": + seen.append(event) + elif event != kind: + return + seen.append(tl) + if markers: + seen.append(markers[0].tl) + seen.append(markers[0].segment_base) + seen.append(markers[0].odd_number) + seen.append(markers[0].object) + seen.append(markers[1].tl) + seen.append(markers[1].segment_base) + seen.append(markers[1].odd_number) + seen.append(markers[1].object) + else: + seen.append(None) + lib.stmcb_timing_event = timing_event + self.timing_event_keepalive = timing_event + self.seen = seen + + def check_recording(self, i1, o1, i2, o2): + seen = self.seen + assert seen[0] == self.tls[1] + segbase = lib._stm_get_segment_base + assert seen[1:5] == [self.tls[1], segbase(2), i1, o1] + assert seen[5:9] == [self.tls[0], segbase(1), i2, o2] + assert len(seen) == 9 + def test_marker_odd_simple(self): self.start_transaction() self.push_root(ffi.cast("object_t *", 29)) @@ -13,74 +46,17 @@ assert int(ffi.cast("uintptr_t", x)) == 29 def test_abort_marker_no_shadowstack(self): - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_OUTSIDE_TRANSACTION - assert tl.longest_marker_time == 0.0 + self.recording(lib.STM_CONTENTION_WRITE_WRITE) + p = stm_allocate_old(16) # self.start_transaction() - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() + stm_set_char(p, 'A') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert tl.longest_marker_self[0] == '\x00' - assert tl.longest_marker_other[0] == '\x00' - - def test_abort_marker_shadowstack(self): + self.switch(1) self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() + py.test.raises(Conflict, stm_set_char, p, 'B') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert tl.longest_marker_self[0] == '\x00' - assert tl.longest_marker_other[0] == '\x00' - - def test_abort_marker_no_shadowstack_cb(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - seen.append(1) - lib.stmcb_expand_marker = expand_marker - seen = [] - # - self.start_transaction() - self.abort_transaction() - # - tl = self.get_stm_thread_local() - assert tl.longest_marker_self[0] == '\x00' - assert not seen - - def test_abort_marker_shadowstack_cb(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d %r\x00' % (number, ptr) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - start = time.time() - while abs(time.time() - start) <= 0.1: - pass - self.abort_transaction() - # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER - assert 0.099 <= tl.longest_marker_time <= 0.9 - assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) - assert ffi.string(tl.longest_marker_other) == '' + self.check_recording(0, ffi.NULL, 0, ffi.NULL) def test_macros(self): self.start_transaction() @@ -116,72 +92,8 @@ lib.stm_pop_marker(tl) py.test.raises(EmptyStack, self.pop_root) - def test_stm_expand_marker(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d %r\x00' % (number, ptr) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - self.push_root(stm_allocate(32)) - self.push_root(stm_allocate(16)) - raw = lib._stm_expand_marker() - assert ffi.string(raw) == '29 %r' % (p,) - - def test_stmcb_debug_print(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '<<<%d>>>\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - @ffi.callback("void(char *, double, char *)") - def debug_print(cause, time, marker): - if 0.0 < time < 1.0: - time = "time_ok" - seen.append((ffi.string(cause), time, ffi.string(marker))) - seen = [] - lib.stmcb_expand_marker = expand_marker - lib.stmcb_debug_print = debug_print - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(p) - self.abort_transaction() - # - assert seen == [("run aborted other", "time_ok", "<<<29>>>")] - - def test_multiple_markers(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - seen.append(number) - s = '%d %r\x00' % (number, ptr == ffi.NULL) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - seen = [] - lib.stmcb_expand_marker = expand_marker - # - self.start_transaction() - p = stm_allocate(16) - self.push_root(ffi.cast("object_t *", 27)) - self.push_root(p) - self.push_root(ffi.cast("object_t *", 29)) - self.push_root(ffi.cast("object_t *", ffi.NULL)) - raw = lib._stm_expand_marker() - assert ffi.string(raw) == '29 True' - assert seen == [29] - def test_double_abort_markers_cb_write_write(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_WRITE) p = stm_allocate_old(16) # self.start_transaction() @@ -200,19 +112,10 @@ self.push_root(ffi.cast("object_t *", ffi.NULL)) py.test.raises(Conflict, stm_set_char, p, 'B') # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE - assert ffi.string(tl.longest_marker_self) == '21' - assert ffi.string(tl.longest_marker_other) == '19' + self.check_recording(21, ffi.NULL, 19, ffi.NULL) def test_double_abort_markers_cb_inevitable(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - c = (base + int(ffi.cast("uintptr_t", ptr)))[8] - s = '%d %r\x00' % (number, c) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_INEVITABLE) # self.start_transaction() p = stm_allocate(16) @@ -234,18 +137,10 @@ self.push_root(ffi.cast("object_t *", p)) py.test.raises(Conflict, self.become_inevitable) # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE - assert ffi.string(tl.longest_marker_self) == "21 'B'" - assert ffi.string(tl.longest_marker_other) == "19 'A'" + self.check_recording(21, p, 19, p) def test_read_write_contention(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_READ) p = stm_allocate_old(16) # self.start_transaction() @@ -262,19 +157,10 @@ self.push_root(ffi.cast("object_t *", ffi.NULL)) py.test.raises(Conflict, self.commit_transaction) # - tl = self.get_stm_thread_local() - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ - assert ffi.string(tl.longest_marker_self) == '19' - assert ffi.string(tl.longest_marker_other) == ( - '') + self.check_recording(19, ffi.NULL, 0, ffi.NULL) def test_double_remote_markers_cb_write_write(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_WRITE) p = stm_allocate_old(16) # self.start_transaction() @@ -300,19 +186,10 @@ # py.test.raises(Conflict, self.switch, 0) # - tl = self.get_stm_thread_local() - assert tl is tl0 - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE - assert ffi.string(tl.longest_marker_self) == '19' - assert ffi.string(tl.longest_marker_other) == '21' + self.check_recording(21, ffi.NULL, 19, ffi.NULL) def test_double_remote_markers_cb_write_read(self): - @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") - def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) - assert len(s) <= outbufsize - outbuf[0:len(s)] = s - lib.stmcb_expand_marker = expand_marker + self.recording(lib.STM_CONTENTION_WRITE_READ) p = stm_allocate_old(16) # self.start_transaction() @@ -333,8 +210,24 @@ # py.test.raises(Conflict, self.switch, 0) # - tl = self.get_stm_thread_local() - assert tl is tl0 - assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ - assert ffi.string(tl.longest_marker_self)=='' - assert ffi.string(tl.longest_marker_other) == '21' + self.check_recording(21, ffi.NULL, 0, ffi.NULL) + + def test_all(self): + self.recording("ALL") + self.start_transaction() + self.commit_transaction() + self.start_transaction() + stm_major_collect() + self.abort_transaction() + assert self.seen == [ + lib.STM_TRANSACTION_START, self.tls[0], None, + lib.STM_GC_MINOR_START, self.tls[0], None, + lib.STM_GC_MINOR_DONE, self.tls[0], None, + lib.STM_TRANSACTION_COMMIT, self.tls[0], None, + lib.STM_TRANSACTION_START, self.tls[0], None, + lib.STM_GC_MINOR_START, self.tls[0], None, + lib.STM_GC_MINOR_DONE, self.tls[0], None, + lib.STM_GC_MAJOR_START, self.tls[0], None, + lib.STM_GC_MAJOR_DONE, self.tls[0], None, + lib.STM_TRANSACTION_ABORT, self.tls[0], None, + ] diff --git a/c7/test/test_prof.py b/c7/test/test_prof.py new file mode 100644 --- /dev/null +++ b/c7/test/test_prof.py @@ -0,0 +1,77 @@ +from support import * +import py, os, struct + +udir = py.path.local.make_numbered_dir(prefix = 'stmgc-') + + +def read_log(filename): + f = open(filename, 'rb') + header = f.read(16) + assert header == "STMGC-C7-PROF01\n" + result = [] + while True: + packet = f.read(15) + if not packet: break + sec, nsec, threadnum, event, len0, len1 = \ + struct.unpack("IIIBBB", packet) + result.append((sec + 0.000000001 * nsec, + threadnum, + event, + f.read(len0), + f.read(len1))) + f.close() + return result + + +class TestProf(BaseTest): + + def test_simple(self): + filename = os.path.join(str(udir), 'simple.prof') + r = lib.stm_set_timing_log(filename, ffi.NULL) + assert r == 0 + try: + self.start_transaction() + self.commit_transaction() + finally: + lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + + result = read_log(filename) + assert result[0][2] == lib.STM_TRANSACTION_START + assert result[1][2] == lib.STM_GC_MINOR_START + assert result[2][2] == lib.STM_GC_MINOR_DONE + assert result[3][2] == lib.STM_TRANSACTION_COMMIT + assert len(result) == 4 + + def test_contention(self): + @ffi.callback("int(stm_loc_marker_t *, char *, int)") + def expand_marker(marker, p, s): + p[0] = chr(100 + marker.odd_number) + return 1 + filename = os.path.join(str(udir), 'contention.prof') + r = lib.stm_set_timing_log(filename, expand_marker) + assert r == 0 + try: + p = stm_allocate_old(16) + self.start_transaction() + assert stm_get_char(p) == '\x00' # read + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # write + py.test.raises(Conflict, self.commit_transaction) + finally: + lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + + result = read_log(filename) + id0 = result[0][1] + id1 = result[1][1] + assert result[0][1:5] == (id0, lib.STM_TRANSACTION_START, '', '') + assert result[1][1:5] == (id1, lib.STM_TRANSACTION_START, '', '') + assert result[2][1:5] == (id1, lib.STM_GC_MINOR_START, '', '') + assert result[3][1:5] == (id1, lib.STM_GC_MINOR_DONE, '', '') + assert result[4][1:5] == (id1, lib.STM_CONTENTION_WRITE_READ, + chr(119), '') + assert result[5][1:5] == (id1, lib.STM_TRANSACTION_ABORT, '', '') + assert len(result) == 6 diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py deleted file mode 100644 --- a/c7/test/test_timing.py +++ /dev/null @@ -1,97 +0,0 @@ -from support import * -import py, time - - -class TestTiming(BaseTest): - - def gettimer(self, n): - tl = self.tls[self.current_thread] - lib.stm_flush_timing(tl, 1) - return tl.events[n], tl.timing[n] - - def expect_timer(self, n, expected_time, expected_count='?'): From noreply at buildbot.pypy.org Sun Oct 5 11:53:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 11:53:22 +0200 (CEST) Subject: [pypy-commit] stmgc default: Forgot to generate these events Message-ID: <20141005095322.14E141C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1456:903f2a6c1ed9 Date: 2014-10-05 11:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/903f2a6c1ed9/ Log: Forgot to generate these events diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -178,12 +178,16 @@ dprintf(("pausing...\n")); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION); + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); STM_PSEGMENT->safe_point = SP_RUNNING; dprintf(("pausing done\n")); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); + if (must_abort()) abort_with_mutex(); } @@ -201,6 +205,9 @@ it does. */ contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + timing_event(STM_SEGMENT->running_thread, + STM_ABORTING_OTHER_CONTENTION); + int sp = contmgr.other_pseg->safe_point; switch (sp) { From noreply at buildbot.pypy.org Sun Oct 5 11:54:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 11:54:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Forgot to "hg add" these files Message-ID: <20141005095417.26CDA1C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73778:9a2d8c9e9c98 Date: 2014-10-04 19:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9a2d8c9e9c98/ Log: Forgot to "hg add" these files diff --git a/rpython/translator/stm/src_stm/stm/prof.c b/rpython/translator/stm/src_stm/stm/prof.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/prof.c @@ -0,0 +1,103 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#include + + +static FILE *profiling_file; +static char *profiling_basefn = NULL; +static int (*profiling_expand_marker)(stm_loc_marker_t *, char *, int); + + +static void _stm_profiling_event(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers) +{ + struct buf_s { + uint32_t tv_sec; + uint32_t tv_nsec; + uint32_t thread_num; + uint8_t event; + uint8_t marker_length[2]; + char extra[256]; + } __attribute__((packed)); + + struct buf_s buf; + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + buf.tv_sec = t.tv_sec; + buf.tv_nsec = t.tv_nsec; + buf.thread_num = tl->thread_local_counter; + buf.event = event; + + int len0 = 0; + int len1 = 0; + if (markers != NULL) { + if (markers[0].odd_number != 0) + len0 = profiling_expand_marker(&markers[0], buf.extra, 128); + if (markers[1].odd_number != 0) + len1 = profiling_expand_marker(&markers[1], buf.extra + len0, 128); + } + buf.marker_length[0] = len0; + buf.marker_length[1] = len1; + + fwrite(&buf, offsetof(struct buf_s, extra) + len0 + len1, + 1, profiling_file); +} + +static int default_expand_marker(stm_loc_marker_t *m, char *p, int s) +{ + *(uintptr_t *)p = m->odd_number; + return sizeof(uintptr_t); +} + +static bool open_timing_log(const char *filename) +{ + profiling_file = fopen(filename, "w"); + if (profiling_file == NULL) + return false; + + fwrite("STMGC-C7-PROF01\n", 16, 1, profiling_file); + stmcb_timing_event = _stm_profiling_event; + return true; +} + +static bool close_timing_log(void) +{ + if (stmcb_timing_event == &_stm_profiling_event) { + stmcb_timing_event = NULL; + fclose(profiling_file); + profiling_file = NULL; + return true; + } + return false; +} + +static void forksupport_open_new_profiling_file(void) +{ + if (close_timing_log() && profiling_basefn != NULL) { + char filename[1024]; + snprintf(filename, sizeof(filename), + "%s.fork%ld", profiling_basefn, (long)getpid()); + open_timing_log(filename); + } +} + +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)) +{ + close_timing_log(); + free(profiling_basefn); + profiling_basefn = NULL; + + if (profiling_file_name == NULL) + return 0; + + if (!expand_marker) + expand_marker = default_expand_marker; + profiling_expand_marker = expand_marker; + + if (!open_timing_log(profiling_file_name)) + return -1; + + profiling_basefn = strdup(profiling_file_name); + return 0; +} diff --git a/rpython/translator/stm/src_stm/stm/prof.h b/rpython/translator/stm/src_stm/stm/prof.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/prof.h @@ -0,0 +1,3 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ + +static void forksupport_open_new_profiling_file(void); From noreply at buildbot.pypy.org Sun Oct 5 11:54:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 11:54:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/903f2a6c1ed9 Message-ID: <20141005095418.622C61C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73779:baee254ae5d2 Date: 2014-10-05 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/baee254ae5d2/ Log: import stmgc/903f2a6c1ed9 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -57b388129192 +903f2a6c1ed9 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -179,12 +179,16 @@ dprintf(("pausing...\n")); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION); + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); STM_PSEGMENT->safe_point = SP_RUNNING; dprintf(("pausing done\n")); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); + if (must_abort()) abort_with_mutex(); } @@ -202,6 +206,9 @@ it does. */ contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + timing_event(STM_SEGMENT->running_thread, + STM_ABORTING_OTHER_CONTENTION); + int sp = contmgr.other_pseg->safe_point; switch (sp) { From noreply at buildbot.pypy.org Sun Oct 5 12:07:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 12:07:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Some tests for 903f2a6c1ed9 Message-ID: <20141005100710.EF87D1C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1457:f89f44ca1dc9 Date: 2014-10-05 12:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/f89f44ca1dc9/ Log: Some tests for 903f2a6c1ed9 diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -4,37 +4,38 @@ class TestMarker(BaseTest): - def recording(self, kind): + def recording(self, *kinds): seen = [] @ffi.callback("stmcb_timing_event_fn") def timing_event(tl, event, markers): - if kind == "ALL": - seen.append(event) - elif event != kind: + if len(kinds) > 0 and event not in kinds: return - seen.append(tl) if markers: - seen.append(markers[0].tl) - seen.append(markers[0].segment_base) - seen.append(markers[0].odd_number) - seen.append(markers[0].object) - seen.append(markers[1].tl) - seen.append(markers[1].segment_base) - seen.append(markers[1].odd_number) - seen.append(markers[1].object) + expanded = [] + for i in range(2): + expanded.append((markers[i].tl, + markers[i].segment_base, + markers[i].odd_number, + markers[i].object)) else: - seen.append(None) + expanded = None + seen.append((tl, event, expanded)) lib.stmcb_timing_event = timing_event self.timing_event_keepalive = timing_event self.seen = seen - def check_recording(self, i1, o1, i2, o2): + def check_recording(self, i1, o1, i2, o2, extra=None): seen = self.seen - assert seen[0] == self.tls[1] + tl, event, markers = seen[0] + assert tl == self.tls[1] segbase = lib._stm_get_segment_base - assert seen[1:5] == [self.tls[1], segbase(2), i1, o1] - assert seen[5:9] == [self.tls[0], segbase(1), i2, o2] - assert len(seen) == 9 + assert markers[0] == (self.tls[1], segbase(2), i1, o1) + assert markers[1] == (self.tls[0], segbase(1), i2, o2) + if extra is None: + assert len(seen) == 1 + else: + assert seen[1] == (self.tls[1], extra, None) + assert len(seen) == 2 def test_marker_odd_simple(self): self.start_transaction() @@ -46,7 +47,9 @@ assert int(ffi.cast("uintptr_t", x)) == 29 def test_abort_marker_no_shadowstack(self): - self.recording(lib.STM_CONTENTION_WRITE_WRITE) + self.recording(lib.STM_CONTENTION_WRITE_WRITE, + lib.STM_WAIT_CONTENTION, + lib.STM_ABORTING_OTHER_CONTENTION) p = stm_allocate_old(16) # self.start_transaction() @@ -160,7 +163,8 @@ self.check_recording(19, ffi.NULL, 0, ffi.NULL) def test_double_remote_markers_cb_write_write(self): - self.recording(lib.STM_CONTENTION_WRITE_WRITE) + self.recording(lib.STM_CONTENTION_WRITE_WRITE, + lib.STM_ABORTING_OTHER_CONTENTION) p = stm_allocate_old(16) # self.start_transaction() @@ -186,10 +190,12 @@ # py.test.raises(Conflict, self.switch, 0) # - self.check_recording(21, ffi.NULL, 19, ffi.NULL) + self.check_recording(21, ffi.NULL, 19, ffi.NULL, + extra=lib.STM_ABORTING_OTHER_CONTENTION) def test_double_remote_markers_cb_write_read(self): - self.recording(lib.STM_CONTENTION_WRITE_READ) + self.recording(lib.STM_CONTENTION_WRITE_READ, + lib.STM_ABORTING_OTHER_CONTENTION) p = stm_allocate_old(16) # self.start_transaction() @@ -210,24 +216,25 @@ # py.test.raises(Conflict, self.switch, 0) # - self.check_recording(21, ffi.NULL, 0, ffi.NULL) + self.check_recording(21, ffi.NULL, 0, ffi.NULL, + extra=lib.STM_ABORTING_OTHER_CONTENTION) def test_all(self): - self.recording("ALL") + self.recording() # all events self.start_transaction() self.commit_transaction() self.start_transaction() stm_major_collect() self.abort_transaction() assert self.seen == [ - lib.STM_TRANSACTION_START, self.tls[0], None, - lib.STM_GC_MINOR_START, self.tls[0], None, - lib.STM_GC_MINOR_DONE, self.tls[0], None, - lib.STM_TRANSACTION_COMMIT, self.tls[0], None, - lib.STM_TRANSACTION_START, self.tls[0], None, - lib.STM_GC_MINOR_START, self.tls[0], None, - lib.STM_GC_MINOR_DONE, self.tls[0], None, - lib.STM_GC_MAJOR_START, self.tls[0], None, - lib.STM_GC_MAJOR_DONE, self.tls[0], None, - lib.STM_TRANSACTION_ABORT, self.tls[0], None, + (self.tls[0], lib.STM_TRANSACTION_START, None), + (self.tls[0], lib.STM_GC_MINOR_START, None), + (self.tls[0], lib.STM_GC_MINOR_DONE, None), + (self.tls[0], lib.STM_TRANSACTION_COMMIT, None), + (self.tls[0], lib.STM_TRANSACTION_START, None), + (self.tls[0], lib.STM_GC_MINOR_START, None), + (self.tls[0], lib.STM_GC_MINOR_DONE, None), + (self.tls[0], lib.STM_GC_MAJOR_START, None), + (self.tls[0], lib.STM_GC_MAJOR_DONE, None), + (self.tls[0], lib.STM_TRANSACTION_ABORT, None), ] From noreply at buildbot.pypy.org Sun Oct 5 14:49:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 14:49:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: Must also dump the number of the other thread. Message-ID: <20141005124957.775C71C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1458:32dbfbd04b6f Date: 2014-10-05 14:50 +0200 http://bitbucket.org/pypy/stmgc/changeset/32dbfbd04b6f/ Log: Must also dump the number of the other thread. diff --git a/c7/stm/prof.c b/c7/stm/prof.c --- a/c7/stm/prof.c +++ b/c7/stm/prof.c @@ -14,6 +14,7 @@ uint32_t tv_sec; uint32_t tv_nsec; uint32_t thread_num; + uint32_t other_thread_num; uint8_t event; uint8_t marker_length[2]; char extra[256]; @@ -25,11 +26,14 @@ buf.tv_sec = t.tv_sec; buf.tv_nsec = t.tv_nsec; buf.thread_num = tl->thread_local_counter; + buf.other_thread_num = 0; buf.event = event; int len0 = 0; int len1 = 0; if (markers != NULL) { + if (markers[1].tl != NULL) + buf.other_thread_num = markers[1].tl->thread_local_counter; if (markers[0].odd_number != 0) len0 = profiling_expand_marker(&markers[0], buf.extra, 128); if (markers[1].odd_number != 0) diff --git a/c7/test/test_prof.py b/c7/test/test_prof.py --- a/c7/test/test_prof.py +++ b/c7/test/test_prof.py @@ -10,12 +10,12 @@ assert header == "STMGC-C7-PROF01\n" result = [] while True: - packet = f.read(15) + packet = f.read(19) if not packet: break - sec, nsec, threadnum, event, len0, len1 = \ - struct.unpack("IIIBBB", packet) + sec, nsec, threadnum, otherthreadnum, event, len0, len1 = \ + struct.unpack("IIIIBBB", packet) result.append((sec + 0.000000001 * nsec, - threadnum, + (threadnum, otherthreadnum), event, f.read(len0), f.read(len1))) @@ -65,13 +65,13 @@ lib.stm_set_timing_log(ffi.NULL, ffi.NULL) result = read_log(filename) - id0 = result[0][1] - id1 = result[1][1] - assert result[0][1:5] == (id0, lib.STM_TRANSACTION_START, '', '') - assert result[1][1:5] == (id1, lib.STM_TRANSACTION_START, '', '') - assert result[2][1:5] == (id1, lib.STM_GC_MINOR_START, '', '') - assert result[3][1:5] == (id1, lib.STM_GC_MINOR_DONE, '', '') - assert result[4][1:5] == (id1, lib.STM_CONTENTION_WRITE_READ, + id0 = result[0][1][0] + id1 = result[1][1][0] + assert result[0][1:5] == ((id0, 0), lib.STM_TRANSACTION_START, '', '') + assert result[1][1:5] == ((id1, 0), lib.STM_TRANSACTION_START, '', '') + assert result[2][1:5] == ((id1, 0), lib.STM_GC_MINOR_START, '', '') + assert result[3][1:5] == ((id1, 0), lib.STM_GC_MINOR_DONE, '', '') + assert result[4][1:5] == ((id1, id0), lib.STM_CONTENTION_WRITE_READ, chr(119), '') - assert result[5][1:5] == (id1, lib.STM_TRANSACTION_ABORT, '', '') + assert result[5][1:5] == ((id1, 0), lib.STM_TRANSACTION_ABORT, '', '') assert len(result) == 6 From noreply at buildbot.pypy.org Sun Oct 5 14:51:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 14:51:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/32dbfbd04b6f Message-ID: <20141005125124.D33261C3342@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73780:c7d8dc261e28 Date: 2014-10-05 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/c7d8dc261e28/ Log: import stmgc/32dbfbd04b6f diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -903f2a6c1ed9 +32dbfbd04b6f diff --git a/rpython/translator/stm/src_stm/stm/prof.c b/rpython/translator/stm/src_stm/stm/prof.c --- a/rpython/translator/stm/src_stm/stm/prof.c +++ b/rpython/translator/stm/src_stm/stm/prof.c @@ -15,6 +15,7 @@ uint32_t tv_sec; uint32_t tv_nsec; uint32_t thread_num; + uint32_t other_thread_num; uint8_t event; uint8_t marker_length[2]; char extra[256]; @@ -26,11 +27,14 @@ buf.tv_sec = t.tv_sec; buf.tv_nsec = t.tv_nsec; buf.thread_num = tl->thread_local_counter; + buf.other_thread_num = 0; buf.event = event; int len0 = 0; int len1 = 0; if (markers != NULL) { + if (markers[1].tl != NULL) + buf.other_thread_num = markers[1].tl->thread_local_counter; if (markers[0].odd_number != 0) len0 = profiling_expand_marker(&markers[0], buf.extra, 128); if (markers[1].odd_number != 0) From noreply at buildbot.pypy.org Sun Oct 5 15:17:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 15:17:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Reporting a summary of a PYPYSTM file Message-ID: <20141005131700.8B74A1C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73781:e51b6e693dec Date: 2014-10-05 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/e51b6e693dec/ Log: Reporting a summary of a PYPYSTM file diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py new file mode 100755 --- /dev/null +++ b/pypy/stm/print_stm_log.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +import sys +import struct + +# ____________________________________________________________ + +STM_TRANSACTION_START = 0 +STM_TRANSACTION_COMMIT = 1 +STM_TRANSACTION_ABORT = 2 + +# contention; see details at the start of contention.c +STM_CONTENTION_WRITE_WRITE = 3 # markers: self loc / other written loc +STM_CONTENTION_WRITE_READ = 4 # markers: self written loc / other missing +STM_CONTENTION_INEVITABLE = 5 # markers: self loc / other inev loc + +# following a contention, we get from the same thread one of +# STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort), +# or STM_WAIT_CONTENTION (self-wait). +STM_ABORTING_OTHER_CONTENTION = 6 + +# always one STM_WAIT_xxx followed later by STM_WAIT_DONE +STM_WAIT_FREE_SEGMENT = 7 +STM_WAIT_SYNC_PAUSE = 8 +STM_WAIT_CONTENTION = 9 +STM_WAIT_DONE = 10 + +# start and end of GC cycles +STM_GC_MINOR_START = 11 +STM_GC_MINOR_DONE = 12 +STM_GC_MAJOR_START = 13 +STM_GC_MAJOR_DONE = 14 + +event_name = {} +for _key, _value in globals().items(): + if _key.startswith('STM_'): + event_name[_value] = _key + +# ____________________________________________________________ + + +class LogEntry(object): + def __init__(self, timestamp, threadnum, otherthreadnum, + event, marker1, marker2): + self.timestamp = timestamp + self.threadnum = threadnum + self.otherthreadnum = otherthreadnum + self.event = event + self.marker1 = marker1 + self.marker2 = marker2 + + +def parse_log(filename): + f = open(filename, 'rb') + result = [] + try: + header = f.read(16) + if header != "STMGC-C7-PROF01\n": + raise ValueError("wrong format in file %r" % (filename,)) + result = [] + while True: + packet = f.read(19) + if not packet: break + sec, nsec, threadnum, otherthreadnum, event, len1, len2 = \ + struct.unpack("IIIIBBB", packet) + m1 = f.read(len1) + m2 = f.read(len2) + result.append(LogEntry(sec + 0.000000001 * nsec, + threadnum, otherthreadnum, event, m1, m2)) + finally: + f.close() + return result + + + +class ThreadState(object): + def __init__(self, threadnum): + self.threadnum = threadnum + self.cpu_time = 0.0 + + def transaction_start(self, entry): + self._start = entry + self._conflict = None + + def transaction_stop(self, entry): + transaction_time = entry.timestamp - self._start.timestamp + self.cpu_time += transaction_time + self._start = None + if self._conflict and entry.event == STM_TRANSACTION_ABORT: + c = self._conflict[1] + c.aborted_time += transaction_time + self._conflict = None + + def in_transaction(self): + return self._start is not None + + +class ConflictSummary(object): + def __init__(self, event, marker1, marker2): + self.event = event + self.marker1 = marker1 + self.marker2 = marker2 + self.aborted_time = 0.0 + self.paused_time = 0.0 + self.num_events = 0 + + def sortkey(self): + return self.aborted_time + self.paused_time + + +def percent(fraction, total): + r = '%.1f' % (fraction * 100.0 / total) + if len(r) > 3: + r = r.split('.')[0] + return r + '%' + +def dump(logentries): + total_time = logentries[-1].timestamp - logentries[0].timestamp + print 'Total real time: %.3fs' % (total_time,) + # + threads = {} + conflicts = {} + for entry in logentries: + if entry.event == STM_TRANSACTION_START: + t = threads.get(entry.threadnum) + if t is None: + t = threads[entry.threadnum] = ThreadState(entry.threadnum) + t.transaction_start(entry) + elif (entry.event == STM_TRANSACTION_COMMIT or + entry.event == STM_TRANSACTION_ABORT): + t = threads.get(entry.threadnum) + if t is not None and t.in_transaction(): + t.transaction_stop(entry) + elif entry.event in (STM_CONTENTION_WRITE_WRITE, + STM_CONTENTION_WRITE_READ, + STM_CONTENTION_INEVITABLE): + summary = (entry.event, entry.marker1, entry.marker2) + c = conflicts.get(summary) + if c is None: + c = conflicts[summary] = ConflictSummary(*summary) + c.num_events += 1 + t = threads.get(entry.threadnum) + if t is not None and t.in_transaction(): + t._conflict = ("local", c, entry) + elif entry.event == STM_ABORTING_OTHER_CONTENTION: + t = threads.get(entry.threadnum) + if t is not None and t._conflict and t._conflict[0] == "local": + _, c, entry = t._conflict + t._conflict = None + t2 = threads.get(entry.otherthreadnum) + if t2 is not None and t2.in_transaction(): + t2._conflict = ("remote", c, entry) + #elif entry.event == ...STM_WAIT... + # + total_cpu_time = sum([t.cpu_time for t in threads.values()]) + print 'Total CPU time in STM mode: %.3fs (%s)' % ( + total_cpu_time, percent(total_cpu_time, total_time)) + print + # + values = sorted(conflicts.values(), key=ConflictSummary.sortkey) + for c in values[-1:-15:-1]: + print '%.3fs lost in aborts, %.3fs paused (%s)' % ( + c.aborted_time, c.paused_time, event_name[c.event]) + print ' ', c.marker1 + if c.marker2: + print ' ', c.marker2 + print + + +def main(argv): + assert len(argv) == 1, "expected a filename argument" + dump(parse_log(argv[0])) + return 0 + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) From noreply at buildbot.pypy.org Sun Oct 5 15:24:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 15:24:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Print the source code line, if found Message-ID: <20141005132401.9883C1C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73782:34ec42503109 Date: 2014-10-05 15:23 +0200 http://bitbucket.org/pypy/pypy/changeset/34ec42503109/ Log: Print the source code line, if found diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -1,6 +1,6 @@ #!/usr/bin/env python import sys -import struct +import struct, re, linecache # ____________________________________________________________ @@ -107,6 +107,17 @@ return self.aborted_time + self.paused_time +r_marker = re.compile(r'File "(.+)", line (\d+)') + +def print_marker(marker): + print ' ' + marker + match = r_marker.match(marker) + if match: + line = linecache.getline(match.group(1), int(match.group(2))) + line = line.strip() + if line: + print ' ' + line + def percent(fraction, total): r = '%.1f' % (fraction * 100.0 / total) if len(r) > 3: @@ -158,11 +169,11 @@ # values = sorted(conflicts.values(), key=ConflictSummary.sortkey) for c in values[-1:-15:-1]: - print '%.3fs lost in aborts, %.3fs paused (%s)' % ( - c.aborted_time, c.paused_time, event_name[c.event]) - print ' ', c.marker1 + print '%.3fs lost in aborts, %.3fs paused (%dx %s)' % ( + c.aborted_time, c.paused_time, c.num_events, event_name[c.event]) + print_marker(c.marker1) if c.marker2: - print ' ', c.marker2 + print_marker(c.marker2) print From noreply at buildbot.pypy.org Sun Oct 5 17:53:24 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sun, 5 Oct 2014 17:53:24 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: Fix Tests Message-ID: <20141005155324.6A9291C023E@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r73783:d37fb439e6b2 Date: 2014-10-05 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/d37fb439e6b2/ Log: Fix Tests diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -4,6 +4,7 @@ import Queue import py import util +import collections READ_MODE = 'rU' WRITE_MODE = 'wb' @@ -74,13 +75,20 @@ output = one_output.read(READ_MODE) else: output = "" + if logfname.check(file=1): logdata = logfname.read(READ_MODE) else: logdata = "" + + + failure, extralog = util.interpret_exitcode(exitcode, test, logdata) + junit = logfname + '.junit' + if junit.check(file=1): + pass if extralog: logdata += extralog @@ -103,7 +111,7 @@ N = run_param.parallel_runs if N > 1: - out.write("running %d parallel test workers\n" % N) + run_param.log("running %d parallel test workers", N) failure = False for testname in testdirs: @@ -241,7 +249,7 @@ else: out = open(opts.output, WRITE_MODE) - testdirs = [] + testdirs = deque() run_param = RunParamClass.from_options(opts, out) # the config files are python files whose run overrides the content @@ -251,7 +259,7 @@ for config_py_file in opts.config: config_py_file = renamed_configs.get(config_py_file, config_py_file) config_py_file = os.path.expanduser(config_py_file) - if py.path.local(config_py_file).check(file=1): + if os.path.isfile(config_py_file): run_param.log("using config %s", config_py_file) execfile(config_py_file, run_param.__dict__) else: diff --git a/testrunner/test/test_runner.py b/testrunner/test/test_runner.py --- a/testrunner/test/test_runner.py +++ b/testrunner/test/test_runner.py @@ -98,14 +98,14 @@ cls.real_invoke_in_thread = (runner.invoke_in_thread,) if not cls.with_thread: runner.invoke_in_thread = lambda func, args: func(*args) - + cls.udir = py.path.local.make_numbered_dir(prefix='usession-runner-', keep=3) cls.manydir = cls.udir.join('many').ensure(dir=1) cls.udir.join("conftest.py").write("pytest_plugins = 'resultlog'\n") - def fill_test_dir(test_dir, fromdir='normal'): + def fill_test_dir(test_dir, fromdir='normal'): for p in py.path.local(__file__).dirpath( 'examples', fromdir).listdir("*.py"): p.copy(test_dir.join('test_'+p.basename)) @@ -115,7 +115,7 @@ cls.one_test_dir = cls.manydir.join('one') fill_test_dir(test_normal_dir0) - + test_normal_dir1 = cls.manydir.join('two', 'test_normal1').ensure(dir=1) test_normal_dir2 = cls.manydir.join('two', 'pkg', @@ -140,8 +140,8 @@ run_param = runner.RunParam(self.one_test_dir,out) run_param.test_driver = test_driver - run_param.parallel_runs = 3 - + run_param.parallel_runs = 3 + res = runner.execute_tests(run_param, ['test_normal'], log) assert res @@ -153,7 +153,7 @@ log = log.getvalue() assert '\r\n' not in log - assert '\n' in log + assert '\n' in log log_lines = log.splitlines() assert ". test_normal/test_example.py::test_one" in log_lines @@ -178,7 +178,7 @@ run_param.test_driver = test_driver run_param.parallel_runs = 3 run_param.runfunc = run_param.dry_run - + res = runner.execute_tests(run_param, ['test_normal'], log) assert not res @@ -187,11 +187,11 @@ out_lines = out.getvalue().splitlines() - assert len(out_lines) == 5 + assert len(out_lines) == 6 - assert out_lines[2].startswith("++ starting") - assert out_lines[4].startswith("run [") - for line in out_lines[2:]: + assert out_lines[3].startswith("++ starting") + assert out_lines[5].startswith("run [") + for line in out_lines[3:]: assert "test_normal" in line def test_many_dirs(self): @@ -212,7 +212,7 @@ testdirs = [] run_param.collect_testdirs(testdirs) alltestdirs = testdirs[:] - + res = runner.execute_tests(run_param, testdirs, log) assert res @@ -282,7 +282,7 @@ real_collect_one_testdir(testdirs, reldir, tests) run_param.collect_one_testdir = witness_collect_one_testdir - + run_param.collect_testdirs(res) assert res == ['test_normal'] @@ -295,10 +295,10 @@ run_param.collect_one_testdir = real_collect_one_testdir res = [] run_param = runner.RunParam(self.two_test_dir, sys.stdout) - + run_param.collect_testdirs(res) - assert sorted(res) == ['pkg/test_normal2', 'test_normal1'] + assert sorted(res) == ['pkg/test_normal2', 'test_normal1'] class TestRunner(RunnerTests): From noreply at buildbot.pypy.org Sun Oct 5 17:53:30 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sun, 5 Oct 2014 17:53:30 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: merge from default Message-ID: <20141005155330.968F61C023E@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r73784:27f5610c0e09 Date: 2014-10-05 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/27f5610c0e09/ Log: merge from default diff too long, truncating to 2000 out of 11590 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -367,3 +367,43 @@ Detailed license information is contained in the NOTICE file in the directory. + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. diff --git a/_pytest/README-BEFORE-UPDATING b/_pytest/README-BEFORE-UPDATING new file mode 100644 --- /dev/null +++ b/_pytest/README-BEFORE-UPDATING @@ -0,0 +1,17 @@ +This is PyPy's code of the pytest lib. We don't expect to upgrade it +very often, but once we do: + + WARNING! + + WE HAVE MADE A FEW TWEAKS HERE! + +Please be sure that you don't just copy the newer version from +upstream without checking the few changes that we did. This +can be done like this: + + cd + hg log . -v | less + +then search for all " _pytest/" in that list to know which are the +relevant checkins. (Look for the checkins that only edit one +or two files in this directory.) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -53,16 +53,24 @@ self.config = config self.logfile = logfile # preferably line buffered - def write_log_entry(self, testpath, lettercode, longrepr): - py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + def write_log_entry(self, testpath, lettercode, longrepr, sections=None): + _safeprint("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): - py.builtin.print_(" %s" % line, file=self.logfile) + _safeprint(" %s" % line, file=self.logfile) + if sections is not None and ( + lettercode in ('E', 'F')): # to limit the size of logs + for title, content in sections: + _safeprint(" ---------- %s ----------" % (title,), + file=self.logfile) + for line in content.splitlines(): + _safeprint(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) + self.write_log_entry(testpath, lettercode, longrepr, + getattr(report, 'sections', None)) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: @@ -98,3 +106,8 @@ if path is None: path = "cwd:%s" % py.path.local() self.write_log_entry(path, '!', str(excrepr)) + +def _safeprint(s, file): + if isinstance(s, unicode): + s = s.encode('utf-8') + py.builtin.print_(s, file=file) diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -179,25 +179,27 @@ import sys f = open(TESTFN, "r+b") try: - m = mmap.mmap(f.fileno(), mapsize+1) - except ValueError: - # we do not expect a ValueError on Windows - # CAUTION: This also changes the size of the file on disk, and - # later tests assume that the length hasn't changed. We need to - # repair that. + try: + m = mmap.mmap(f.fileno(), mapsize+1) + except ValueError: + # we do not expect a ValueError on Windows + # CAUTION: This also changes the size of the file on disk, and + # later tests assume that the length hasn't changed. We need to + # repair that. + if sys.platform.startswith('win'): + self.fail("Opening mmap with size+1 should work on Windows.") + else: + # we expect a ValueError on Unix, but not on Windows + if not sys.platform.startswith('win'): + self.fail("Opening mmap with size+1 should raise ValueError.") + m.close() + finally: + f.close() if sys.platform.startswith('win'): - self.fail("Opening mmap with size+1 should work on Windows.") - else: - # we expect a ValueError on Unix, but not on Windows - if not sys.platform.startswith('win'): - self.fail("Opening mmap with size+1 should raise ValueError.") - m.close() - f.close() - if sys.platform.startswith('win'): - # Repair damage from the resizing test. - f = open(TESTFN, 'r+b') - f.truncate(mapsize) - f.close() + # Repair damage from the resizing test. + f = open(TESTFN, 'r+b') + f.truncate(mapsize) + f.close() # Opening mmap with access=ACCESS_WRITE f = open(TESTFN, "r+b") diff --git a/lib-python/2.7/test/test_select.py b/lib-python/2.7/test/test_select.py --- a/lib-python/2.7/test/test_select.py +++ b/lib-python/2.7/test/test_select.py @@ -57,7 +57,17 @@ del a[-1] return sys.__stdout__.fileno() a[:] = [F()] * 10 - self.assertEqual(select.select([], a, []), ([], a[:5], [])) + result = select.select([], a, []) + # CPython: 'a' ends up with 5 items, because each fileno() + # removes an item and at the middle the iteration stops. + # PyPy: 'a' ends up empty, because the iteration is done on + # a copy of the original list: fileno() is called 10 times. + if test_support.check_impl_detail(cpython=True): + self.assertEqual(len(result[1]), 5) + self.assertEqual(len(a), 5) + if test_support.check_impl_detail(pypy=True): + self.assertEqual(len(result[1]), 10) + self.assertEqual(len(a), 0) def test_main(): test_support.run_unittest(SelectTestCase) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -286,6 +286,13 @@ lib = ffi.verify(""" +#ifdef __APPLE__ +/* the following define is necessary for OS X 10.6+; without it, the + Apple-supplied ncurses.h sets NCURSES_OPAQUE to 1, and then Python + can't get at the WINDOW flags field. */ +#define NCURSES_OPAQUE 0 +#endif + #include #include #include diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -1242,7 +1242,7 @@ (other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: - raise TypeError("cannot compare naive and aware times") + raise TypeError("can't compare offset-naive and offset-aware times") myhhmm = self._hour * 60 + self._minute - myoff othhmm = other._hour * 60 + other._minute - otoff return _cmp((myhhmm, self._second, self._microsecond), @@ -1838,7 +1838,7 @@ other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: - raise TypeError("cannot compare naive and aware datetimes") + raise TypeError("can't compare offset-naive and offset-aware datetimes") # XXX What follows could be done more efficiently... diff = self - other # this will take offsets into account if diff.days < 0: @@ -1885,7 +1885,7 @@ if myoff == otoff: return base if myoff is None or otoff is None: - raise TypeError("cannot mix naive and timezone-aware time") + raise TypeError("can't subtract offset-naive and offset-aware datetimes") return base + timedelta(minutes = otoff-myoff) def __hash__(self): diff --git a/py/README-BEFORE-UPDATING b/py/README-BEFORE-UPDATING new file mode 100644 --- /dev/null +++ b/py/README-BEFORE-UPDATING @@ -0,0 +1,17 @@ +This is PyPy's code of the py lib. We don't expect to upgrade it +very often, but once we do: + + WARNING! + + WE HAVE MADE A FEW TWEAKS HERE! + +Please be sure that you don't just copy the newer version from +upstream without checking the few changes that we did. This +can be done like this: + + cd + hg log . -v | less + +then search for all " py/" in that list to know which are the +relevant checkins. (Look for the checkins that only edit one +or two files in this directory.) diff --git a/py/_path/local.py b/py/_path/local.py --- a/py/_path/local.py +++ b/py/_path/local.py @@ -750,7 +750,8 @@ mkdtemp = classmethod(mkdtemp) def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, - lock_timeout = 172800): # two days + lock_timeout = 172800, # two days + min_timeout = 300): # five minutes """ return unique directory with a number greater than the current maximum one. The number is assumed to start directly after prefix. if keep is true directories with a number less than (maxnum-keep) @@ -818,6 +819,20 @@ for path in rootdir.listdir(): num = parse_num(path) if num is not None and num <= (maxnum - keep): + if min_timeout: + # NB: doing this is needed to prevent (or reduce + # a lot the chance of) the following situation: + # 'keep+1' processes call make_numbered_dir() at + # the same time, they create dirs, but then the + # last process notices the first dir doesn't have + # (yet) a .lock in it and kills it. + try: + t1 = path.lstat().mtime + t2 = lockfile.lstat().mtime + if abs(t2-t1) < min_timeout: + continue # skip directories too recent + except py.error.Error: + continue # failure to get a time, better skip lf = path.join('.lock') try: t1 = lf.lstat().mtime diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -65,9 +65,9 @@ # built documents. # # The short X.Y version. -version = '2.3' +version = '2.4' # The full version, including alpha/beta/rc tags. -release = '2.3.0' +release = '2.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -111,6 +111,10 @@ of your choice. Typical example: ``--opt=2`` gives a good (but of course slower) Python interpreter without the JIT. + Consider using PyPy instead of CPython in the above command line, + as it is much faster. (Note that ``rpython`` is a Python 2 program, + not Python 3; you need to run either PyPy 2 or CPython 2.) + .. _`optimization level`: config/opt.html If everything works correctly this will create an executable diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -38,14 +38,16 @@ no JIT: windows, linux, os/x sandbox: linux, os/x +* repackage and upload source tar.bz2 to bitbucket and to cobra, as some packagers + prefer a clearly labeled source package * write release announcement pypy/doc/release-x.y(.z).txt the release announcement should contain a direct link to the download page * update pypy.org (under extradoc/pypy.org), rebuild and commit * post announcement on morepypy.blogspot.com -* send announcements to pypy-dev, python-list, +* send announcements to twitter.com, pypy-dev, python-list, python-announce, python-dev ... * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release - +* revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-2.4.0.rst release-2.3.1.rst release-2.3.0.rst release-2.2.1.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.3.1`_: the latest official release +* `Release 2.4.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.3.1`: http://pypy.org/download.html +.. _`Release 2.4.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.4.0.rst b/pypy/doc/release-2.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.4.0.rst @@ -0,0 +1,122 @@ +================================================= +PyPy 2.4 - Snow White +================================================= + +We're pleased to announce PyPy 2.4, which contains significant performance +enhancements and bug fixes. + +You can download the PyPy 2.4.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress, but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! We would like to also point out that in +September, `the Python Software Foundation`_ will `match funds`_ for +any donations up to $10k! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatible version + we call PyPy3 2.3.1, and are working toward a Python 3.3 compatible version + +* `STM`_ (software transactional memory): We have released a first working version, + and continue to try out new promising paths of achieving a fast multithreaded Python + +* `NumPy`_ which requires installation of our fork of upstream numpy, + available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy +.. _`the Python Software Foundation`: https://www.python.org/psf/ +.. _`match funds`: http://morepypy.blogspot.com/2014/09/python-software-foundation-matching.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.4 and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows, and OpenBSD), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.4 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Benchmarks improved after internal enhancements in string and +bytearray handling, and a major rewrite of the GIL handling. This means +that external calls are now a lot faster, especially the CFFI ones. It also +means better performance in a lot of corner cases with handling strings or +bytearrays. The main bugfix is handling of many socket objects in your +program which in the long run used to "leak" memory. + +PyPy now uses Python 2.7.8 standard library. + +We fixed a memory leak in IO in the sandbox_ code + +We welcomed more than 12 new contributors, and conducted two Google +Summer of Code projects, as well as other student projects not +directly related to Summer of Code. + + +Issues reported with our previous release were fixed after reports from users on +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* Reduced internal copying of bytearray operations + +* Tweak the internal structure of StringBuilder to speed up large string + handling, which becomes advantageous on large programs at the cost of slightly + slower small *benchmark* type programs. + +* Boost performance of thread-local variables in both unjitted and jitted code, + this mostly affects errno handling on linux, which makes external calls + faster. + +* Move to a mixed polling and mutex GIL model that make mutlithreaded jitted + code run *much* faster + +* Optimize errno handling in linux (x86 and x86-64 only) + +* Remove ctypes pythonapi and ctypes.PyDLL, which never worked on PyPy + +* Fix performance regression on ufunc(, ) in numpy + +* Classes in the ast module are now distinct from structures used by + the compiler, which simplifies and speeds up translation of our + source code to the PyPy binary interpreter + +* Upgrade stdlib from 2.7.5 to 2.7.8 + +* Win32 now links statically to zlib, expat, bzip, and openssl-1.0.1i. + No more missing DLLs + +* Many issues were resolved_ since the 2.3.1 release on June 8 + +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.4.0.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved +.. _sandbox: http://doc.pypy.org/en/latest/sandbox.html + +We have further improvements on the way: rpython file handling, +numpy linalg compatibility, as well +as improved GC and many smaller improvements. + +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/release-2.4.rst b/pypy/doc/release-2.4.rst deleted file mode 100644 --- a/pypy/doc/release-2.4.rst +++ /dev/null @@ -1,107 +0,0 @@ -================================================= -PyPy 2.4 - ???????? -================================================= - -We're pleased to announce PyPy 2.4, a significant milestone on it's own right -and the proud parent of our recent PyPy3 and STM releases. - -This release contains several improvements and bugfixes. - -You can download the PyPy 2.4 release here: - - http://pypy.org/download.html - -We would like to thank our donors for the continued support of the PyPy -project, and for those who donate to our three sub-projects. -We've shown quite a bit of progress -but we're slowly running out of funds. -Please consider donating more, or even better convince your employer to donate, -so we can finish those projects! The three sub-projects are: - -* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatable version - we call PyPy3 2.3.1, and are working toward a Python 3.3 compatable version - -* `STM`_ (software transactional memory): We have release a first working version, and -continue to try out new promising paths of acheiving a fast multithreaded python - -* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_ - -.. _`Py3k`: http://pypy.org/py3donate.html -.. _`STM`: http://pypy.org/tmdonate2.html -.. _`NumPy`: http://pypy.org/numpydonate.html -.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy - -What is PyPy? -============= - -PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; -note that cpython's speed has not changed since 2.7.2) -due to its integrated tracing JIT compiler. - -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, -and OpenBSD, -as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. - -While we support 32 bit python on Windows, work on the native Windows 64 -bit python is still stalling, we would welcome a volunteer -to `handle that`_. - -.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org -.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation - -Highlights -========== - -Benchmarks improved after internal improvements in string and bytearray handling, -and a major rewrite of the GIL handling. Many of these improvements are offshoots -of the STM work. - -We merged in Python's 2.7.8 stdlib in a record time of one week, proving the -maturity of our underlying RPython code base and PyPy interpreter. - -We welcomed more than 12 new contributors, and conducted two Google Summer of Code -projects XXX details? - -Issues reported with our previous release were fixed after reports from users on -our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at -#pypy. Here is a summary of the user-facing changes; -for more information see `whats-new`_: - -* Reduced internal copying of bytearray operations - -* Tweak the internal structure of StringBuilder to speed up large string -handling, which becomes advantageous on large programs at the cost of slightly -slower small *benchmark* type programs. - -* Boost performance of thread-local variables in both unjitted and jitted code - -* Move to a mixed polling and mutex GIL model that make mutli-threaded jitted - code run *much* faster - -* Optimize errno handling in linux - -* Remove ctypes pythonapi and ctypes.PyDLL, which never worked on PyPy - -* Fix performance regression on ufunc(, ) in numpy - -* Classes in the ast module are now distinct from structures used by the compiler, - which simplifies and speeds up translation of our source code to the PyPy binary - interpreter - -* Upgrade stdlib from 2.7.5 to 2.7.8 - -* - -* Many issues were resolved_ since the 2.3.1 release on June 8 - -.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html -.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved - -Please try it out and let us know what you think. We especially welcome -success stories, we know you are using PyPy, please tell us about it! - -Cheers - -The PyPy Team - diff --git a/pypy/doc/whatsnew-2.4.0.rst b/pypy/doc/whatsnew-2.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.4.0.rst @@ -0,0 +1,66 @@ +======================= +What's new in PyPy 2.4+ +======================= + +.. this is a revision shortly after release-2.3.x +.. startrev: ca9b7cf02cf4 + +.. branch: fix-bytearray-complexity +Bytearray operations no longer copy the bytearray unnecessarily + +Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, +``__setslice__``, and ``__len__`` to RPython + +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). + +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. + +.. branch: pytest-25 +Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, +respectively. + +.. branch: split-ast-classes +Classes in the ast module are now distinct from structures used by the compiler. + +.. branch: stdlib-2.7.8 +Upgrades from 2.7.6 to 2.7.8 + +.. branch: cpybug-seq-radd-rmul +Fix issue #1861 - cpython compatability madness + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,62 +1,14 @@ + ======================= -What's new in PyPy 2.4+ +What's new in PyPy 2.5+ ======================= -.. this is a revision shortly after release-2.3.x -.. startrev: ca9b7cf02cf4 +.. this is a revision shortly after release-2.4.x +.. startrev: 7026746cbb1b -.. branch: fix-bytearray-complexity -Bytearray operations no longer copy the bytearray unnecessarily - -Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, -``__setslice__``, and ``__len__`` to RPython - -.. branch: stringbuilder2-perf -Give the StringBuilder a more flexible internal structure, with a -chained list of strings instead of just one string. This make it -more efficient when building large strings, e.g. with cStringIO(). - -Also, use systematically jit.conditional_call() instead of regular -branches. This lets the JIT make more linear code, at the cost of -forcing a bit more data (to be passed as arguments to -conditional_calls). I would expect the net result to be a slight -slow-down on some simple benchmarks and a speed-up on bigger -programs. - -.. branch: ec-threadlocal -Change the executioncontext's lookup to be done by reading a thread- -local variable (which is implemented in C using '__thread' if -possible, and pthread_getspecific() otherwise). On Linux x86 and -x86-64, the JIT backend has a special optimization that lets it emit -directly a single MOV from a %gs- or %fs-based address. It seems -actually to give a good boost in performance. - -.. branch: fast-gil -A faster way to handle the GIL, particularly in JIT code. The GIL is -now a composite of two concepts: a global number (it's just set from -1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there -are threads waiting to acquire the GIL, one of them is actively -checking the global number every 0.1 ms to 1 ms. Overall, JIT loops -full of external function calls now run a bit faster (if no thread was -started yet), or a *lot* faster (if threads were started already). - -.. branch: jit-get-errno -Optimize the errno handling in the JIT, notably around external -function calls. Linux-only. - -.. branch: disable_pythonapi -Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this -incompatibility with cpython. Recast sys.dllhandle to an int. - -.. branch: scalar-operations -Fix performance regression on ufunc(, ) in numpy. - -.. branch: pytest-25 -Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, -respectively. - -.. branch: split-ast-classes -Classes in the ast module are now distinct from structures used by the compiler. - -.. branch: stdlib-2.7.8 -Upgrades from 2.7.6 to 2.7.8 +.. branch: win32-fixes5 +Fix c code generation for msvc so empty "{ }" are avoided in unions, +Avoid re-opening files created with NamedTemporaryFile, +Allocate by 4-byte chunks in rffi_platform, +Skip testing objdump if it does not exist, +and other small adjustments in own tests diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -85,10 +85,13 @@ Abridged method (for -Ojit builds using Visual Studio 2008) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download the versions of all the external packages -from +Download the versions of all the external packages from +https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip +(for 2.4 release and later) or https://bitbucket.org/pypy/pypy/downloads/local.zip -Then expand it into the base directory (base_dir) and modify your environment to reflect this:: +(for pre-2.4 versions) +Then expand it into the base directory (base_dir) and modify your environment +to reflect this:: set PATH=\bin;\tcltk\bin;%PATH% set INCLUDE=\include;\tcltk\include;%INCLUDE% diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -29,6 +29,17 @@ space.w_None) self.startup_called = False + def _cleanup_(self): + """Called by the annotator on prebuilt Module instances. + We don't have many such modules, but for the ones that + show up, remove their __file__ rather than translate it + statically inside the executable.""" + try: + space = self.space + space.delitem(self.w_dict, space.wrap('__file__')) + except OperationError: + pass + def install(self): """NOT_RPYTHON: installs this module into space.builtin_modules""" w_mod = self.space.wrap(self) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -38,18 +38,15 @@ def cpython_code_signature(code): "([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)." argcount = code.co_argcount + varnames = code.co_varnames assert argcount >= 0 # annotator hint - argnames = list(code.co_varnames[:argcount]) + argnames = list(varnames[:argcount]) if code.co_flags & CO_VARARGS: - varargname = code.co_varnames[argcount] + varargname = varnames[argcount] argcount += 1 else: varargname = None - if code.co_flags & CO_VARKEYWORDS: - kwargname = code.co_varnames[argcount] - argcount += 1 - else: - kwargname = None + kwargname = varnames[argcount] if code.co_flags & CO_VARKEYWORDS else None return Signature(argnames, varargname, kwargname) diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -83,12 +83,6 @@ v = PyString_DecodeEscape(space, substr, 'strict', enc) return space.wrap(v) -def hexbyte(val): - result = "%x" % val - if len(result) == 1: - result = "0" + result - return result - def decode_unicode_utf8(space, s, ps, q): # ****The Python 2.7 version, producing UTF-32 escapes**** # String is utf8-encoded, but 'unicode_escape' expects @@ -108,15 +102,14 @@ # instead. lis.append("u005c") if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-32-be") - rn = len(w) - assert rn % 4 == 0 - for i in range(0, rn, 4): - lis.append('\\U') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - lis.append(hexbyte(ord(w[i+2]))) - lis.append(hexbyte(ord(w[i+3]))) + w, ps = decode_utf8(space, s, ps, end) + for c in w: + # The equivalent of %08x, which is not supported by RPython. + # 7 zeroes are enough for the unicode range, and the + # result still fits in 32-bit. + hexa = hex(ord(c) + 0x10000000) + lis.append('\\U0') + lis.append(hexa[3:]) # Skip 0x and the leading 1 else: lis.append(s[ps]) ps += 1 @@ -136,7 +129,7 @@ # note that the C code has a label here. # the logic is the same. if recode_encoding and ord(s[ps]) & 0x80: - w, ps = decode_utf8(space, s, ps, end, recode_encoding) + w, ps = decode_utf8_recode(space, s, ps, end, recode_encoding) # Append bytes to output buffer. builder.append(w) else: @@ -222,14 +215,18 @@ ch >= 'A' and ch <= 'F') -def decode_utf8(space, s, ps, end, encoding): +def decode_utf8(space, s, ps, end): assert ps >= 0 pt = ps # while (s < end && *s != '\\') s++; */ /* inefficient for u".." while ps < end and ord(s[ps]) & 0x80: ps += 1 - w_u = space.wrap(unicodehelper.decode_utf8(space, s[pt:ps])) - w_v = unicodehelper.encode(space, w_u, encoding) + u = unicodehelper.decode_utf8(space, s[pt:ps]) + return u, ps + +def decode_utf8_recode(space, s, ps, end, recode_encoding): + u, ps = decode_utf8(space, s, ps, end) + w_v = unicodehelper.encode(space, space.wrap(u), recode_encoding) v = space.str_w(w_v) return v, ps diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -73,11 +73,11 @@ def test_simple_enc_roundtrip(self): space = self.space - s = "'\x81'" + s = "'\x81\\t'" s = s.decode("koi8-u").encode("utf8") w_ret = parsestring.parsestr(self.space, 'koi8-u', s) ret = space.unwrap(w_ret) - assert ret == eval("# -*- coding: koi8-u -*-\n'\x81'") + assert ret == eval("# -*- coding: koi8-u -*-\n'\x81\\t'") def test_multiline_unicode_strings_with_backslash(self): space = self.space diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -945,7 +945,7 @@ prefix = udir.join('pathtest').ensure(dir=1) fake_exe = 'bin/pypy-c' if sys.platform == 'win32': - fake_exe += '.exe' + fake_exe = 'pypy-c.exe' fake_exe = prefix.join(fake_exe).ensure(file=1) expected_path = [str(prefix.join(subdir).ensure(dir=1)) for subdir in ('lib_pypy', @@ -985,6 +985,13 @@ assert sys.path == old_sys_path + [self.goal_dir] app_main.setup_bootstrap_path(self.fake_exe) + if not sys.platform == 'win32': + # an existing file is always 'executable' on windows + assert sys.executable == '' # not executable! + assert sys.path == old_sys_path + [self.goal_dir] + + os.chmod(self.fake_exe, 0755) + app_main.setup_bootstrap_path(self.fake_exe) assert sys.executable == self.fake_exe assert self.goal_dir not in sys.path diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -1,4 +1,5 @@ - +import py +from pypy.interpreter.error import OperationError from pypy.interpreter.module import Module class TestModule: @@ -17,6 +18,18 @@ space.raises_w(space.w_AttributeError, space.delattr, w_m, w('x')) + def test___file__(self, space): + w = space.wrap + m = Module(space, space.wrap('m')) + py.test.raises(OperationError, space.getattr, w(m), w('__file__')) + m._cleanup_() + py.test.raises(OperationError, space.getattr, w(m), w('__file__')) + space.setattr(w(m), w('__file__'), w('m.py')) + space.getattr(w(m), w('__file__')) # does not raise + m._cleanup_() + py.test.raises(OperationError, space.getattr, w(m), w('__file__')) + + class AppTest_ModuleObject: def test_attr(self): m = __import__('__builtin__') diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -5,6 +5,7 @@ @specialize.memo() def decode_error_handler(space): + # Fast version of the "strict" errors handler. def raise_unicode_exception_decode(errors, encoding, msg, s, startingpos, endingpos): raise OperationError(space.w_UnicodeDecodeError, @@ -17,6 +18,7 @@ @specialize.memo() def encode_error_handler(space): + # Fast version of the "strict" errors handler. def raise_unicode_exception_encode(errors, encoding, msg, u, startingpos, endingpos): raise OperationError(space.w_UnicodeEncodeError, diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -8,6 +8,7 @@ from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt @@ -160,6 +161,7 @@ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') + keepalive_until_here(args_w) return w_res def get_mustfree_flag(data): diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -86,7 +86,7 @@ initval = space.unicode_w(w_initval) size = len(initval) self.resize_buffer(size) - self.buf = [c for c in initval] + self.buf = list(initval) pos = space.getindex_w(w_pos, space.w_TypeError) if pos < 0: raise OperationError(space.w_ValueError, diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py --- a/pypy/module/_pypyjson/interp_encoder.py +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -37,16 +37,14 @@ sb = StringBuilder(len(u)) sb.append_slice(s, 0, first) else: + # We used to check if 'u' contains only safe characters, and return + # 'w_string' directly. But this requires an extra pass over all + # characters, and the expected use case of this function, from + # json.encoder, will anyway re-encode a unicode result back to + # a string (with the ascii encoding). This requires two passes + # over the characters. So we may as well directly turn it into a + # string here --- only one pass. u = space.unicode_w(w_string) - for i in range(len(u)): - c = u[i] - if c >= u' ' and c <= u'~' and c != u'"' and c != u'\\': - pass - else: - break - else: - # the input is a unicode with only non-special ascii chars - return w_string sb = StringBuilder(len(u)) first = 0 diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -192,14 +192,14 @@ def test_raw_encode_basestring_ascii(self): import _pypyjson - def check(s, expected_type=str): + def check(s): s = _pypyjson.raw_encode_basestring_ascii(s) - assert type(s) is expected_type + assert type(s) is str return s assert check("") == "" - assert check(u"", expected_type=unicode) == u"" + assert check(u"") == "" assert check("abc ") == "abc " - assert check(u"abc ", expected_type=unicode) == u"abc " + assert check(u"abc ") == "abc " raises(UnicodeDecodeError, check, "\xc0") assert check("\xc2\x84") == "\\u0084" assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45" diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -759,17 +759,25 @@ # socket's timeout is in seconds, poll's timeout in ms timeout = int(sock_timeout * 1000 + 0.5) - ready = rpoll.poll(fddict, timeout) + try: + ready = rpoll.poll(fddict, timeout) + except rpoll.PollError, e: + message = e.get_msg() + raise ssl_error(space, message, e.errno) else: if MAX_FD_SIZE is not None and sock_fd >= MAX_FD_SIZE: return SOCKET_TOO_LARGE_FOR_SELECT - if writing: - r, w, e = rpoll.select([], [sock_fd], [], sock_timeout) - ready = w - else: - r, w, e = rpoll.select([sock_fd], [], [], sock_timeout) - ready = r + try: + if writing: + r, w, e = rpoll.select([], [sock_fd], [], sock_timeout) + ready = w + else: + r, w, e = rpoll.select([sock_fd], [], [], sock_timeout) + ready = r + except rpoll.SelectError as e: + message = e.get_msg() + raise ssl_error(space, message, e.errno) if ready: return SOCKET_OPERATION_OK else: diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -266,10 +266,16 @@ buf = None if typ == rwinreg.REG_DWORD: - if space.isinstance_w(w_value, space.w_int): + if space.is_none(w_value) or ( + space.isinstance_w(w_value, space.w_int) or + space.isinstance_w(w_value, space.w_long)): + if space.is_none(w_value): + value = r_uint(0) + else: + value = space.c_uint_w(w_value) buflen = rffi.sizeof(rwin32.DWORD) buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw') - buf1[0] = space.uint_w(w_value) + buf1[0] = value buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -40,7 +40,7 @@ cls.w_tmpfilename = space.wrap(str(udir.join('winreg-temp'))) test_data = [ - ("Int Value", 45, _winreg.REG_DWORD), + ("Int Value", 0xFEDCBA98, _winreg.REG_DWORD), ("Str Value", "A string Value", _winreg.REG_SZ), ("Unicode Value", u"A unicode Value", _winreg.REG_SZ), ("Str Expand", "The path is %path%", _winreg.REG_EXPAND_SZ), @@ -137,9 +137,11 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx, REG_BINARY + from _winreg import CreateKey, SetValueEx, REG_BINARY, REG_DWORD key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, None) + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, 45) for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.8" /* PyPy version as a string */ -#define PYPY_VERSION "2.4.0-alpha0" +#define PYPY_VERSION "2.5.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -407,8 +407,19 @@ -------- numpy.swapaxes : equivalent function """ - if self.is_scalar(): + if axis1 == axis2: return self + n = len(self.get_shape()) + if n <= 1: + return self + if axis1 < 0: + axis1 += n + if axis2 < 0: + axis2 += n + if axis1 < 0 or axis1 >= n: + raise oefmt(space.w_ValueError, "bad axis1 argument to swapaxes") + if axis2 < 0 or axis2 >= n: + raise oefmt(space.w_ValueError, "bad axis2 argument to swapaxes") return self.implementation.swapaxes(space, self, axis1, axis2) def descr_nonzero(self, space): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2020,6 +2020,14 @@ def test_swapaxes(self): from numpypy import array + x = array([]) + assert x.swapaxes(0, 2) is x + x = array([[1, 2]]) + assert x.swapaxes(0, 0) is x + exc = raises(ValueError, x.swapaxes, -3, 0) + assert exc.value.message == "bad axis1 argument to swapaxes" + exc = raises(ValueError, x.swapaxes, 0, 3) + assert exc.value.message == "bad axis2 argument to swapaxes" # testcases from numpy docstring x = array([[1, 2, 3]]) assert (x.swapaxes(0, 1) == array([[1], [2], [3]])).all() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -617,7 +617,7 @@ 'raw_store': 1, 'same_as': 2, 'setarrayitem_gc': 8, - 'setfield_gc': 21, + 'setfield_gc': 22, }) def define_argsort(): diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -39,7 +39,7 @@ 'irshift', 'isub', 'itruediv', 'ixor', '_length_hint'] interpleveldefs = { - '_compare_digest': 'interp_operator.compare_digest', + '_compare_digest': 'tscmp.compare_digest', } for name in interp_names: diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -4,7 +4,7 @@ This module exports a set of operators as functions. E.g. operator.add(x,y) is equivalent to x+y. ''' -from __pypy__ import builtinify + import types @@ -27,7 +27,7 @@ 'getslice(a, b, c) -- Same as a[b:c].' if not isinstance(start, int) or not isinstance(end, int): raise TypeError("an integer is expected") - return a[start:end] + return a[start:end] __getslice__ = getslice def indexOf(a, b): @@ -37,7 +37,7 @@ if x == b: return index index += 1 - raise ValueError, 'sequence.index(x): x not in sequence' + raise ValueError('sequence.index(x): x not in sequence') def isMappingType(obj,): 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' @@ -58,9 +58,9 @@ def repeat(obj, num): 'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.' if not isinstance(num, (int, long)): - raise TypeError, 'an integer is required' + raise TypeError('an integer is required') if not isSequenceType(obj): - raise TypeError, "non-sequence object can't be repeated" + raise TypeError("non-sequence object can't be repeated") return obj * num @@ -68,59 +68,85 @@ def setslice(a, b, c, d): 'setslice(a, b, c, d) -- Same as a[b:c] = d.' - a[b:c] = d + a[b:c] = d __setslice__ = setslice +def _resolve_attr_chain(chain, obj, idx=0): + obj = getattr(obj, chain[idx]) + if idx + 1 == len(chain): + return obj + else: + return _resolve_attr_chain(chain, obj, idx + 1) + + +class _simple_attrgetter(object): + def __init__(self, attr): + self._attr = attr + + def __call__(self, obj): + return getattr(obj, self._attr) + + +class _single_attrgetter(object): + def __init__(self, attrs): + self._attrs = attrs + + def __call__(self, obj): + return _resolve_attr_chain(self._attrs, obj) + + +class _multi_attrgetter(object): + def __init__(self, attrs): + self._attrs = attrs + + def __call__(self, obj): + return tuple([ + _resolve_attr_chain(attrs, obj) + for attrs in self._attrs + ]) + + def attrgetter(attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + return _raise_typeerror if attrs: - getters = [single_attr_getter(a) for a in (attr,) + attrs] - def getter(obj): - return tuple([getter(obj) for getter in getters]) + return _multi_attrgetter([ + a.split(".") for a in [attr] + list(attrs) + ]) + elif "." not in attr: + return _simple_attrgetter(attr) else: - getter = single_attr_getter(attr) - return builtinify(getter) + return _single_attrgetter(attr.split(".")) -def single_attr_getter(attr): - if not isinstance(attr, str): - if not isinstance(attr, unicode): - def _raise_typeerror(obj): - raise TypeError("argument must be a string, not %r" % - (type(attr).__name__,)) - return _raise_typeerror - attr = attr.encode('ascii') - # - def make_getter(name, prevfn=None): - if prevfn is None: - def getter(obj): - return getattr(obj, name) + +class itemgetter(object): + def __init__(self, item, *items): + self._single = not bool(items) + if self._single: + self._idx = item else: - def getter(obj): - return getattr(prevfn(obj), name) - return getter - # - last = 0 - getter = None - while True: - dot = attr.find(".", last) - if dot < 0: break - getter = make_getter(attr[last:dot], getter) - last = dot + 1 - return make_getter(attr[last:], getter) + self._idx = [item] + list(items) + def __call__(self, obj): + if self._single: + return obj[self._idx] + else: + return tuple([obj[i] for i in self._idx]) -def itemgetter(item, *items): - if items: - list_of_indices = [item] + list(items) - def getter(obj): - return tuple([obj[i] for i in list_of_indices]) - else: - def getter(obj): - return obj[item] - return builtinify(getter) +class methodcaller(object): + def __init__(self, method_name, *args, **kwargs): + self._method_name = method_name + self._args = args + self._kwargs = kwargs -def methodcaller(method_name, *args, **kwargs): - def call(obj): - return getattr(obj, method_name)(*args, **kwargs) - return builtinify(call) + def __call__(self, obj): + return getattr(obj, self._method_name)(*self._args, **self._kwargs) diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -1,6 +1,4 @@ -from rpython.rlib.objectmodel import specialize - -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec @@ -249,33 +247,3 @@ @unwrap_spec(default=int) def _length_hint(space, w_iterable, default): return space.wrap(space.length_hint(w_iterable, default)) - -def compare_digest(space, w_a, w_b): - if ( - space.isinstance_w(w_a, space.w_unicode) and - space.isinstance_w(w_b, space.w_unicode) - ): - return space.wrap(tscmp(space.unicode_w(w_a), space.unicode_w(w_b))) - if ( - space.isinstance_w(w_a, space.w_unicode) or - space.isinstance_w(w_b, space.w_unicode) - ): - raise oefmt( - space.w_TypeError, - "unsupported operand types(s) or combination of types: '%N' and '%N'", - w_a, - w_b, - ) - else: - return space.wrap(tscmp(space.bufferstr_w(w_a), space.bufferstr_w(w_b))) - - - at specialize.argtype(0, 1) -def tscmp(a, b): - len_a = len(a) - len_b = len(b) - length = min(len(a), len(b)) - res = len_a ^ len_b - for i in xrange(length): - res |= ord(a[i]) ^ ord(b[i]) - return res == 0 diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -334,3 +334,9 @@ assert operator._compare_digest(a, b) a, b = mybytes(b"foobar"), mybytes(b"foobaz") assert not operator._compare_digest(a, b) + + def test_compare_digest_unicode(self): + import operator + assert operator._compare_digest(u'asd', u'asd') + assert not operator._compare_digest(u'asd', u'qwe') + raises(TypeError, operator._compare_digest, u'asd', b'qwe') diff --git a/pypy/module/operator/test/test_tscmp.py b/pypy/module/operator/test/test_tscmp.py new file mode 100644 --- /dev/null +++ b/pypy/module/operator/test/test_tscmp.py @@ -0,0 +1,28 @@ +from pypy.module.operator.tscmp import pypy_tscmp, pypy_tscmp_wide + +class TestTimingSafeCompare: + tostr = str + tscmp = staticmethod(pypy_tscmp) + + def test_tscmp_neq(self): + assert not self.tscmp(self.tostr('asd'), self.tostr('qwe'), 3, 3) + + def test_tscmp_eq(self): + assert self.tscmp(self.tostr('asd'), self.tostr('asd'), 3, 3) + + def test_tscmp_len(self): + assert self.tscmp(self.tostr('asdp'), self.tostr('asdq'), 3, 3) + + def test_tscmp_nlen(self): + assert not self.tscmp(self.tostr('asd'), self.tostr('asd'), 2, 3) + + +class TestTimingSafeCompareWide(TestTimingSafeCompare): + tostr = unicode + tscmp = staticmethod(pypy_tscmp_wide) + + def test_tscmp_wide_nonascii(self): + a, b = u"\ud808\udf45", u"\ud808\udf45" + assert self.tscmp(a, b, len(a), len(b)) + a, b = u"\ud808\udf45", u"\ud808\udf45 " + assert not self.tscmp(a, b, len(a), len(b)) diff --git a/pypy/module/operator/tscmp.c b/pypy/module/operator/tscmp.c new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.c @@ -0,0 +1,80 @@ +/* Derived from CPython 3.3.5's operator.c::_tscmp + */ + +#include +#include +#include "tscmp.h" + +int +pypy_tscmp(const char *a, const char *b, long len_a, long len_b) +{ + /* The volatile type declarations make sure that the compiler has no + * chance to optimize and fold the code in any way that may change + * the timing. + */ + volatile long length; + volatile const char *left; + volatile const char *right; + long i; + char result; + + /* loop count depends on length of b */ + length = len_b; + left = NULL; + right = b; + + /* don't use else here to keep the amount of CPU instructions constant, + * volatile forces re-evaluation + * */ + if (len_a == length) { + left = *((volatile const char**)&a); + result = 0; + } + if (len_a != length) { + left = b; + result = 1; + } + + for (i=0; i < length; i++) { + result |= *left++ ^ *right++; + } + + return (result == 0); +} + +int +pypy_tscmp_wide(const wchar_t *a, const wchar_t *b, long len_a, long len_b) +{ + /* The volatile type declarations make sure that the compiler has no + * chance to optimize and fold the code in any way that may change + * the timing. + */ + volatile long length; + volatile const wchar_t *left; + volatile const wchar_t *right; + long i; + wchar_t result; + + /* loop count depends on length of b */ + length = len_b; + left = NULL; + right = b; + + /* don't use else here to keep the amount of CPU instructions constant, + * volatile forces re-evaluation + * */ + if (len_a == length) { + left = *((volatile const wchar_t**)&a); + result = 0; + } + if (len_a != length) { + left = b; + result = 1; + } + + for (i=0; i < length; i++) { + result |= *left++ ^ *right++; + } + + return (result == 0); +} diff --git a/pypy/module/operator/tscmp.h b/pypy/module/operator/tscmp.h new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.h @@ -0,0 +1,2 @@ +int pypy_tscmp(const char *, const char *, long, long); +int pypy_tscmp_wide(const wchar_t *, const wchar_t *, long, long); diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.py @@ -0,0 +1,73 @@ +""" +Provides _compare_digest method, which is a safe comparing to prevent timing +attacks for the hmac module. +""" +import py + +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import oefmt + +cwd = py.path.local(__file__).dirpath() +eci = ExternalCompilationInfo( + includes=[cwd.join('tscmp.h')], + include_dirs=[str(cwd)], + separate_module_files=[cwd.join('tscmp.c')], + export_symbols=['pypy_tscmp', 'pypy_tscmp_wide']) + + +def llexternal(*args, **kwargs): + kwargs.setdefault('compilation_info', eci) + kwargs.setdefault('sandboxsafe', True) + return rffi.llexternal(*args, **kwargs) + + +pypy_tscmp = llexternal( + 'pypy_tscmp', + [rffi.CCHARP, rffi.CCHARP, rffi.LONG, rffi.LONG], + rffi.INT) +pypy_tscmp_wide = llexternal( + 'pypy_tscmp_wide', + [rffi.CWCHARP, rffi.CWCHARP, rffi.LONG, rffi.LONG], + rffi.INT) + + +def compare_digest(space, w_a, w_b): + """compare_digest(a, b) -> bool + + Return 'a == b'. This function uses an approach designed to prevent + timing analysis, making it appropriate for cryptography. a and b + must both be of the same type: either str (ASCII only), or any type + that supports the buffer protocol (e.g. bytes). + + Note: If a and b are of different lengths, or if an error occurs, a + timing attack could theoretically reveal information about the types + and lengths of a and b--but not their values. + """ + if (space.isinstance_w(w_a, space.w_unicode) and + space.isinstance_w(w_b, space.w_unicode)): + a = space.unicode_w(w_a) + b = space.unicode_w(w_b) + with rffi.scoped_nonmoving_unicodebuffer(a) as a_buf: + with rffi.scoped_nonmoving_unicodebuffer(b) as b_buf: + result = pypy_tscmp_wide(a_buf, b_buf, len(a), len(b)) + return space.wrap(rffi.cast(lltype.Bool, result)) + return compare_digest_buffer(space, w_a, w_b) + + +def compare_digest_buffer(space, w_a, w_b): + try: + a_buf = w_a.buffer_w(space, space.BUF_SIMPLE) + b_buf = w_b.buffer_w(space, space.BUF_SIMPLE) + except TypeError: + raise oefmt(space.w_TypeError, + "unsupported operand types(s) or combination of types: " + "'%T' and '%T'", w_a, w_b) + + a = a_buf.as_str() + b = b_buf.as_str() + with rffi.scoped_nonmovingbuffer(a) as a_buf: + with rffi.scoped_nonmovingbuffer(b) as b_buf: + result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) + return space.wrap(rffi.cast(lltype.Bool, result)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -17,13 +17,18 @@ # now we can inline it as call assembler i = 0 j = 0 - while i < 20: + while i < 25: i += 1 j += rec(100) # ID: call_rec return j # - log = self.run(fn, [], threshold=18) - loop, = log.loops_by_filename(self.filepath) + # NB. the parameters below are a bit ad-hoc. After 16 iterations, + # the we trace from the "while" and reach a "trace too long". Then + # in the next execution, we trace the "rec" function from start; + # that's "functrace" below. Then after one or two extra iterations + # we try again from "while", and this time we succeed. + log = self.run(fn, [], threshold=20) + functrace, loop = log.loops_by_filename(self.filepath) assert loop.match_by_id('call_rec', """ ... p53 = call_assembler(..., descr=...) @@ -377,12 +382,16 @@ ... p20 = force_token() p22 = new_with_vtable(...) - p24 = new_array(1, descr=) + p24 = new_array_clear(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) {{{ setfield_gc(p0, p20, descr=) + setfield_gc(p22, ConstPtr(null), descr=) + setfield_gc(p22, ConstPtr(null), descr=) setfield_gc(p22, 1, descr=) + setfield_gc(p22, ConstPtr(null), descr=) setfield_gc(p26, ConstPtr(ptr22), descr=) + setfield_gc(p26, ConstPtr(null), descr=) setarrayitem_gc(p24, 0, p26, descr=) setfield_gc(p22, p24, descr=) }}} diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -68,10 +68,13 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) + p15 = new_array_clear(8, descr=) setfield_gc(p13, p15, descr=) i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + {{{ setfield_gc(p13, 16, descr=) + setfield_gc(p13, 0, descr=) + }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -1,4 +1,4 @@ -import py, sys +import py, sys, re from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestCProfile(BaseTestPyPyC): @@ -26,10 +26,20 @@ for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - # on 32-bit, there is f1=read_timestamp(); ...; - # f2=read_timestamp(); f3=call(llong_sub,f1,f2) - # which should turn into a single PADDQ/PSUBQ - if sys.maxint != 2147483647: - assert ' call(' not in repr(loop.ops_by_id(method)) + # on 32-bit, there is f1=call(read_timestamp); ...; + # f2=call(read_timestamp); f3=call(llong_sub,f1,f2) + # but all calls can be special-cased by the backend if + # supported. On 64-bit there is only the two calls to + # read_timestamp. + r = re.compile(r" call[(]ConstClass[(](.+?)[)]") + calls = r.findall(repr(loop.ops_by_id(method))) + if sys.maxint == 2147483647: + assert len(calls) == 6 + else: + assert len(calls) == 2 + for x in calls: + assert ('ll_read_timestamp' in x or 'llong_sub' in x + or 'llong_add' in x) + # assert ' call_may_force(' not in repr(loop.ops_by_id(method)) assert ' cond_call(' in repr(loop.ops_by_id(method)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -340,30 +340,19 @@ guard_value(p166, ConstPtr(ptr72), descr=...) p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) - i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) - i169 = int_add(i168, i97) - i170 = int_sub(i160, i106) - setfield_gc(p167, i168, descr=) + i112 = int_sub(i160, -32768) setfield_gc(p167, ConstPtr(null), descr=) - setfield_gc(p167, ConstPtr(ptr89), descr=) - i171 = uint_gt(i170, i108) - guard_false(i171, descr=...) - i172 = int_sub(i160, -32768) - i173 = int_and(i172, 65535) - i174 = int_add(i173, -32768) - setarrayitem_raw(i169, 0, i174, descr=) - i175 = int_add(i168, i121) - i176 = int_sub(i160, i130) - i177 = uint_gt(i176, i132) - guard_false(i177, descr=...) - setarrayitem_raw(i175, 0, i174, descr=) - i178 = int_add(i168, i140) - i179 = int_sub(i160, i149) - i180 = uint_gt(i179, i151) - guard_false(i180, descr=...) - setarrayitem_raw(i178, 0, i174, descr=) + setfield_gc(p167, ConstPtr(ptr85), descr=) + i114 = uint_gt(i112, 65535) + guard_false(i114, descr=...) + i115 = int_and(i112, 65535) + i116 = int_add(i115, -32768) --TICK-- - i183 = arraylen_gc(p67, descr=) - i184 = arraylen_gc(p92, descr=) + i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) + raw_store(i119, 0, i116, descr=) + raw_store(i119, 2, i116, descr=) + raw_store(i119, 4, i116, descr=) + setfield_gc(p167, i119, descr=) + i123 = arraylen_gc(p67, descr=) jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -1,4 +1,5 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +from rpython.rlib.rawstorage import misaligned_is_fine class TestMicroNumPy(BaseTestPyPyC): @@ -15,6 +16,14 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + if misaligned_is_fine: + alignment_check = "" + else: + alignment_check = """ + i93 = int_and(i79, 7) + i94 = int_is_zero(i93) + guard_true(i94, descr=...) + """ assert loop.match(""" i76 = int_lt(i71, 300) guard_true(i76, descr=...) @@ -22,6 +31,7 @@ guard_false(i77, descr=...) i78 = int_mul(i71, i61) i79 = int_add(i55, i78) + """ + alignment_check + """ f80 = raw_load(i67, i79, descr=) i81 = int_add(i71, 1) guard_not_invalidated(descr=...) @@ -44,6 +54,14 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + if misaligned_is_fine: + alignment_check = "" + else: + alignment_check = """ + i97 = int_and(i84, 7) + i98 = int_is_zero(i97) + guard_true(i98, descr=...) + """ assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) @@ -51,6 +69,7 @@ guard_false(i82, descr=...) i83 = int_mul(i76, i64) i84 = int_add(i58, i83) + """ + alignment_check + """ f85 = raw_load(i70, i84, descr=) guard_not_invalidated(descr=...) f86 = float_add(f74, f85) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -110,9 +110,12 @@ i85 = strlen(p80) p86 = new(descr=) p88 = newstr(23) - setfield_gc(..., descr=) - setfield_gc(..., descr=) - setfield_gc(..., descr=) + {{{ + setfield_gc(p86, 0, descr=) + setfield_gc(p86, p88, descr=) + setfield_gc(p86, 23, descr=) + setfield_gc(p86, 23, descr=) + }}} call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) guard_no_exception(descr=...) i89 = getfield_gc(p86, descr=) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -173,9 +173,9 @@ On Windows, only sockets are supported; on Unix, all file descriptors. """ - iwtd_w = space.listview(w_iwtd) - owtd_w = space.listview(w_owtd) - ewtd_w = space.listview(w_ewtd) + iwtd_w = space.unpackiterable(w_iwtd) + owtd_w = space.unpackiterable(w_owtd) + ewtd_w = space.unpackiterable(w_ewtd) if space.is_w(w_timeout, space.w_None): timeout = -1.0 diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -85,17 +85,18 @@ assert owtd == [writeend] total_out += writeend.send(b'x' * 512) total_in = 0 - while True: - iwtd, owtd, ewtd = select.select([readend], [], [], 0) + while total_in < total_out: + iwtd, owtd, ewtd = select.select([readend], [], [], 5) assert owtd == ewtd == [] - if iwtd == []: - break - assert iwtd == [readend] + assert iwtd == [readend] # there is more expected data = readend.recv(4096) assert len(data) > 0 assert data == b'x' * len(data) total_in += len(data) assert total_in == total_out + iwtd, owtd, ewtd = select.select([readend], [], [], 0) + assert owtd == ewtd == [] + assert iwtd == [] # there is not more expected finally: writeend.close() readend.close() @@ -304,6 +305,20 @@ for fd in rfds: os.close(fd) + def test_resize_list_in_select(self): + import select + class Foo(object): + def fileno(self): + print len(l) + if len(l) < 100: + l.append(Foo()) + return 0 + l = [Foo()] + select.select(l, (), (), 0) + assert 1 <= len(l) <= 100 + # ^^^ CPython gives 100, PyPy gives 1. I think both are OK as + # long as there is no crash. + class AppTestSelectWithSockets(_AppTestSelect): """Same tests with connected sockets. diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -18,6 +18,13 @@ _WIN32 = sys.platform == 'win32' +def _exists_and_is_executable(fn): + # os.access checks using the user's real uid and gid. + # Since pypy should not be run setuid/setgid, this + # should be sufficient. + return os.path.isfile(fn) and os.access(fn, os.X_OK) + + def find_executable(executable): """ Return the absolute path of the executable, by looking into PATH and @@ -34,14 +41,14 @@ if path: for dir in path.split(os.pathsep): fn = os.path.join(dir, executable) - if os.path.isfile(fn): + if _exists_and_is_executable(fn): executable = fn break executable = rpath.rabspath(executable) # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - return executable if os.path.isfile(executable) else '' + return executable if _exists_and_is_executable(executable) else '' def _readlink_maybe(filename): diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -7,15 +7,15 @@ # ____________________________________________________________ # -class State: - def __init__(self, space): - self.space = space +class State: + def __init__(self, space): + self.space = space self.w_modules = space.newdict(module=True) - self.w_warnoptions = space.newlist([]) self.w_argv = space.newlist([]) - self.setinitialpath(space) + + self.setinitialpath(space) def setinitialpath(self, space): from pypy.module.sys.initpath import compute_stdlib_path @@ -25,10 +25,10 @@ path = compute_stdlib_path(self, srcdir) self.w_path = space.newlist([space.wrap(p) for p in path]) - def get(space): From noreply at buildbot.pypy.org Sun Oct 5 17:53:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 17:53:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove the extra "lock" argument to two functions from rlib.rpython. Message-ID: <20141005155352.AEAF61C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73785:a86675035c46 Date: 2014-10-05 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/a86675035c46/ Log: Remove the extra "lock" argument to two functions from rlib.rpython. Instead, use internal logic to create a lock if necessary. diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -46,9 +46,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host, lock) + res = rsocket.gethostbyname_ex(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -60,9 +59,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host, lock) + res = rsocket.gethostbyaddr(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -319,10 +317,3 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) - -class State(object): - def __init__(self, space): - self.netdb_lock = None - - def startup(self, space): - self.netdb_lock = space.allocate_lock() diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,9 +18,10 @@ from rpython.rlib import _rsocket_rffi as _c, jit, rgc from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib.rthread import dummy_lock +from rpython.rlib import rthread from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof +from rpython.rtyper.extregistry import ExtRegistryEntry # Usage of @jit.dont_look_inside in this file is possibly temporary @@ -1135,18 +1136,18 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name, lock=dummy_lock): +def gethostbyname_ex(name): # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - with lock: + with _get_netdb_lock(): hostent = _c.gethostbyname(name) return gethost_common(name, hostent, addr) -def gethostbyaddr(ip, lock=dummy_lock): +def gethostbyaddr(ip): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - with lock: + with _get_netdb_lock(): p, size = addr.lock_in_addr() try: hostent = _c.gethostbyaddr(p, size, addr.family) @@ -1154,6 +1155,36 @@ addr.unlock() return gethost_common(ip, hostent, addr) +# RPython magic to make _netdb_lock turn either into a regular +# rthread.Lock or a rthread.DummyLock, depending on the config +def _get_netdb_lock(): + return rthread.dummy_lock + +class _Entry(ExtRegistryEntry): + _about_ = _get_netdb_lock + + def compute_annotation(self): + config = self.bookkeeper.annotator.translator.config + if config.translation.thread: + fn = _get_netdb_lock_thread + else: + fn = _get_netdb_lock_nothread + return self.bookkeeper.immutablevalue(fn) + +def _get_netdb_lock_nothread(): + return rthread.dummy_lock + +class _LockCache(object): + lock = None +_lock_cache = _LockCache() + + at jit.elidable +def _get_netdb_lock_thread(): + if _lock_cache.lock is None: + _lock_cache.lock = rthread.allocate_lock() + return _lock_cache.lock +# done RPython magic + def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, address_to_fill=None): diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -2,6 +2,7 @@ from rpython.rlib import rsocket from rpython.rlib.rsocket import * import socket as cpy_socket +from rpython.translator.c.test.test_genc import compile def setup_module(mod): @@ -570,4 +571,17 @@ for i in range(nthreads): threads[i].join() assert sum(result) == nthreads - + +def test_translate_netdb_lock(): + def f(): + gethostbyaddr("localhost") + return 0 + fc = compile(f, []) + assert fc() == 0 + +def test_translate_netdb_lock_thread(): + def f(): + gethostbyaddr("localhost") + return 0 + fc = compile(f, [], thread=True) + assert fc() == 0 From noreply at buildbot.pypy.org Sun Oct 5 17:56:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 17:56:53 +0200 (CEST) Subject: [pypy-commit] pypy default: not needed any more Message-ID: <20141005155653.CE7F01C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73786:e1d48c6b8f2a Date: 2014-10-05 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/e1d48c6b8f2a/ Log: not needed any more diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,8 +17,6 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() - from pypy.module._socket.interp_func import State - space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket From noreply at buildbot.pypy.org Sun Oct 5 18:21:31 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 18:21:31 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: Don't put AnnotatedValues in ._is_type_of Message-ID: <20141005162131.53A0F1C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73787:0660898bd1b3 Date: 2014-10-04 23:32 +0100 http://bitbucket.org/pypy/pypy/changeset/0660898bd1b3/ Log: Don't put AnnotatedValues in ._is_type_of diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -243,9 +243,6 @@ def annvalue(self, arg): if isinstance(arg, Variable): - annvalue = arg.binding - if arg.binding is None: - arg.binding = AnnotatedValue(arg, None) return arg.binding else: return AnnotatedValue(arg, self.bookkeeper.immutablevalue(arg.value)) @@ -508,13 +505,13 @@ last_exception_object = annmodel.SomeType() if isinstance(last_exception_var, Constant): last_exception_object.const = last_exception_var.value + last_exception_object.is_type_of = [last_exc_value_var] - last_exception_object.is_type_of = [ - self.annvalue(last_exc_value_var)] if isinstance(last_exception_var, Variable): self.setbinding(last_exception_var, last_exception_object) if isinstance(last_exc_value_var, Variable): self.setbinding(last_exc_value_var, last_exc_value_object) + last_exception_object = annmodel.SomeType() if isinstance(last_exception_var, Constant): last_exception_object.const = last_exception_var.value @@ -536,7 +533,7 @@ elif a == last_exc_value_var: assert in_except_block cells.append(last_exc_value_object) - last_exc_value_vars.append(self.annvalue(v)) + last_exc_value_vars.append(v) else: cell = self.binding(a) if (link.exitcase, a) in knowntypedata: @@ -549,8 +546,8 @@ if hasattr(cell,'is_type_of'): renamed_is_type_of = [] for v in cell.is_type_of: - new_vs = renaming.get(v.value, []) - renamed_is_type_of += map(self.annvalue, new_vs) + new_vs = renaming.get(v,[]) + renamed_is_type_of += new_vs assert cell.knowntype is type newcell = annmodel.SomeType() if cell.is_constant(): diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -42,7 +42,7 @@ if hasattr(tgt_obj.ann, 'is_type_of') and src_obj.ann.is_constant(): add_knowntypedata( knowntypedata, True, - [inst.value for inst in tgt_obj.ann.is_type_of], + tgt_obj.ann.is_type_of, getbookkeeper().valueoftype(src_obj.ann.const)) add_knowntypedata(knowntypedata, True, [tgt_obj.value], src_obj.ann) s_nonnone = tgt_obj.ann diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1402,7 +1402,7 @@ et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() t.const = KeyError - t.is_type_of = [a.annvalue(ev)] + t.is_type_of = [ev] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) @@ -1417,7 +1417,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [a.annvalue(ev)] + t.is_type_of = [ev] t.const = KeyError # IndexError ignored because 'dic' is a dict assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) @@ -1452,7 +1452,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [a.annvalue(ev)] + t.is_type_of = [ev] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) @@ -1474,7 +1474,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [a.annvalue(ev)] + t.is_type_of = [ev] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -24,7 +24,7 @@ @op.type.register(SomeObject) def type_SomeObject(arg): r = SomeType() - r.is_type_of = [arg] + r.is_type_of = [arg.value] return r @op.bool.register(SomeObject) @@ -56,9 +56,10 @@ def issubtype(self, s_cls): if hasattr(self, 'is_type_of'): - instances = self.is_type_of - return builtin.builtin_isinstance(instances[0].ann, s_cls, - [x.value for x in instances]) + vars = self.is_type_of + annotator = getbookkeeper().annotator + return builtin.builtin_isinstance(annotator.binding(vars[0]), + s_cls, vars) if self.is_constant() and s_cls.is_constant(): return immutablevalue(issubclass(self.const, s_cls.const)) return s_Bool diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -48,12 +48,11 @@ s_ev = annotator.binding(ev, None) if s_et: if s_et.knowntype == type: - if isinstance(s_et, annmodel.SomeType): - if (hasattr(s_et, 'is_type_of') and - s_et.is_type_of == [annotator.annvalue(ev)]): + if s_et.__class__ == annmodel.SomeType: + if hasattr(s_et, 'is_type_of') and s_et.is_type_of == [ev]: continue else: - if isinstance(s_et, annmodel.SomePBC): + if s_et.__class__ == annmodel.SomePBC: continue yield "%s exceptblock is not completely sane" % graph.name diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -191,7 +191,7 @@ # fix the annotation of the exceptblock.inputargs etype, evalue = graph.exceptblock.inputargs s_type = annmodel.SomeType() - s_type.is_type_of = [self.annvalue(evalue)] + s_type.is_type_of = [evalue] s_value = annmodel.SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) self.setbinding(etype, s_type) self.setbinding(evalue, s_value) From noreply at buildbot.pypy.org Sun Oct 5 18:21:32 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 18:21:32 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: hg merge default Message-ID: <20141005162132.A88B71C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73788:d47b22ad6792 Date: 2014-10-05 04:26 +0100 http://bitbucket.org/pypy/pypy/changeset/d47b22ad6792/ Log: hg merge default diff --git a/rpython/tool/uid.py b/rpython/tool/uid.py --- a/rpython/tool/uid.py +++ b/rpython/tool/uid.py @@ -1,18 +1,6 @@ -import struct, sys +import struct -# This is temporary hack to run PyPy on PyPy -# until PyPy's struct module handle P format character. -try: - HUGEVAL_FMT = 'P' - HUGEVAL_BYTES = struct.calcsize('P') -except struct.error: - if sys.maxint <= 2147483647: - HUGEVAL_FMT = 'l' - HUGEVAL_BYTES = 4 - else: - HUGEVAL_FMT = 'q' - HUGEVAL_BYTES = 8 - +HUGEVAL_BYTES = struct.calcsize('P') HUGEVAL = 256 ** HUGEVAL_BYTES @@ -21,15 +9,7 @@ result += HUGEVAL return result -if sys.version_info < (2, 5): - def uid(obj): - """ - Return the id of an object as an unsigned number so that its hex - representation makes sense - """ - return fixid(id(obj)) -else: - uid = id # guaranteed to be positive from CPython 2.5 onwards +uid = id # guaranteed to be positive from CPython 2.5 onwards class Hashable(object): @@ -39,7 +19,7 @@ real hash/compare for immutable ones. """ __slots__ = ["key", "value"] - + def __init__(self, value): self.value = value # a concrete value # try to be smart about constant mutable or immutable values @@ -74,7 +54,7 @@ # try to limit the size of the repr to make it more readable r = repr(self.value) if (r.startswith('<') and r.endswith('>') and - hasattr(self.value, '__name__')): + hasattr(self.value, '__name__')): r = '%s %s' % (type(self.value).__name__, self.value.__name__) elif len(r) > 60 or (len(r) > 30 and type(self.value) is not str): r = r[:22] + '...' + r[-7:] From noreply at buildbot.pypy.org Sun Oct 5 18:21:33 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 18:21:33 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: pass the annotator object down to the specialised operation analysers Message-ID: <20141005162133.D2B981C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73789:49ee4640237d Date: 2014-10-05 17:21 +0100 http://bitbucket.org/pypy/pypy/changeset/49ee4640237d/ Log: pass the annotator object down to the specialised operation analysers diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -24,7 +24,7 @@ @op.is_.register(SomeObject, SomeObject) -def is__default(obj1, obj2): +def is__default(annotator, obj1, obj2): r = SomeBool() s_obj1 = obj1.ann s_obj2 = obj2.ann @@ -59,7 +59,7 @@ def _make_cmp_annotator_default(cmp_op): @cmp_op.register(SomeObject, SomeObject) - def default_annotate(obj1, obj2): + def default_annotate(annotator, obj1, obj2): s_1, s_2 = obj1.ann, obj2.ann if s_1.is_immutable_constant() and s_2.is_immutable_constant(): return immutablevalue(cmp_op.pyfunc(s_1.const, s_2.const)) @@ -245,7 +245,7 @@ def _make_cmp_annotator_int(cmp_op): @cmp_op.register(SomeInteger, SomeInteger) - def _compare_helper(int1, int2): + def _compare_helper(annotator, int1, int2): r = SomeBool() s_int1, s_int2 = int1.ann, int2.ann if s_int1.is_immutable_constant() and s_int2.is_immutable_constant(): @@ -715,8 +715,8 @@ methodname=bltn1.methodname) @op.is_.register(SomePBC, SomePBC) -def is__PBC_PBC(pbc1, pbc2): - s = is__default(pbc1, pbc2) +def is__PBC_PBC(annotator, pbc1, pbc2): + s = is__default(annotator, pbc1, pbc2) if not s.is_constant(): if not pbc1.ann.can_be_None or not pbc2.ann.can_be_None: for desc in pbc1.ann.descriptions: diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -22,13 +22,13 @@ UNARY_OPERATIONS.remove('contains') @op.type.register(SomeObject) -def type_SomeObject(arg): +def type_SomeObject(annotator, arg): r = SomeType() r.is_type_of = [arg.value] return r @op.bool.register(SomeObject) -def bool_SomeObject(obj): +def bool_SomeObject(annotator, obj): r = SomeBool() obj.ann.bool_behavior(r) s_nonnone_obj = obj.ann @@ -40,16 +40,16 @@ return r @op.contains.register(SomeObject) -def contains_SomeObject(obj, element): +def contains_SomeObject(annotator, obj, element): return s_Bool contains_SomeObject.can_only_throw = [] @op.simple_call.register(SomeObject) -def simple_call_SomeObject(func, *args): +def simple_call_SomeObject(annotator, func, *args): return func.ann.call(simple_args([arg.ann for arg in args])) @op.call_args.register(SomeObject) -def call_args(func, *args): +def call_args(annotator, func, *args): return func.ann.call(complex_args([arg.ann for arg in args])) class __extend__(SomeObject): @@ -247,7 +247,7 @@ return SomeTuple(items) @op.contains.register(SomeList) -def contains_SomeList(obj, element): +def contains_SomeList(annotator, obj, element): obj.ann.listdef.generalize(element.ann) return s_Bool contains_SomeList.can_only_throw = [] @@ -344,7 +344,7 @@ return [] # else: no possible exception @op.contains.register(SomeDict) -def contains_SomeDict(dct, element): +def contains_SomeDict(annotator, dct, element): dct.ann.dictdef.generalize_key(element.ann) if dct.ann._is_empty(): s_bool = SomeBool() @@ -436,7 +436,7 @@ @op.contains.register(SomeString) @op.contains.register(SomeUnicodeString) -def contains_String(string, char): +def contains_String(annotator, string, char): if char.ann.is_constant() and char.ann.const == "\0": r = SomeBool() knowntypedata = {} @@ -445,7 +445,7 @@ r.set_knowntypedata(knowntypedata) return r else: - return contains_SomeObject(string, char) + return contains_SomeObject(annotator, string, char) contains_String.can_only_throw = [] diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -99,7 +99,7 @@ def consider(self, annotator, *args): args_s = [arg.ann for arg in args] spec = type(self).get_specialization(*args_s) - return spec(*args) + return spec(annotator, *args) def get_can_only_throw(self, annotator): return None @@ -175,7 +175,7 @@ try: impl = getattr(s_arg, cls.opname) - def specialized(arg, *other_args): + def specialized(annotator, arg, *other_args): return impl(*[x.ann for x in other_args]) try: specialized.can_only_throw = impl.can_only_throw @@ -201,7 +201,7 @@ try: impl = getattr(pair(s_arg1, s_arg2), cls.opname) - def specialized(arg1, arg2, *other_args): + def specialized(annotator, arg1, arg2, *other_args): return impl(*[x.ann for x in other_args]) try: specialized.can_only_throw = impl.can_only_throw From noreply at buildbot.pypy.org Sun Oct 5 18:36:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 5 Oct 2014 18:36:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Pass more systematically around the ports as an RPython int. It now Message-ID: <20141005163610.1E2421C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73790:aa5d6556e1f1 Date: 2014-10-05 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/aa5d6556e1f1/ Log: Pass more systematically around the ports as an RPython int. It now gets casted to USHORT only when stored into the structure fields. diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,5 +1,6 @@ from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET +from rpython.rlib.rarithmetic import intmask from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, WrappedDefault @@ -172,7 +173,7 @@ Convert a 16-bit integer from network to host byte order. """ - return space.wrap(rsocket.ntohs(x)) + return space.wrap(rsocket.ntohs(intmask(x))) @unwrap_spec(x="c_uint") def ntohl(space, x): @@ -188,7 +189,7 @@ Convert a 16-bit integer from host to network byte order. """ - return space.wrap(rsocket.htons(x)) + return space.wrap(rsocket.htons(intmask(x))) @unwrap_spec(x="c_uint") def htonl(space, x): diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -109,10 +109,11 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): + assert isinstance(port, int) if port < 0 or port > 0xffff: raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) - return rffi.cast(rffi.USHORT, port) + return port def make_unsigned_flowinfo(space, flowinfo): if flowinfo < 0 or flowinfo > 0xfffff: diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -52,6 +52,7 @@ def ntohs(x): + assert isinstance(x, int) return rffi.cast(lltype.Signed, _c.ntohs(x)) def ntohl(x): @@ -59,6 +60,7 @@ return rffi.cast(lltype.Unsigned, _c.ntohl(x)) def htons(x): + assert isinstance(x, int) return rffi.cast(lltype.Signed, _c.htons(x)) def htonl(x): @@ -221,7 +223,6 @@ def get_protocol(self): a = self.lock(_c.sockaddr_ll) proto = rffi.getintfield(a, 'c_sll_protocol') - proto = rffi.cast(rffi.USHORT, proto) res = ntohs(proto) self.unlock() return res @@ -257,7 +258,6 @@ def __init__(self, host, port): makeipaddr(host, self) a = self.lock(_c.sockaddr_in) - port = rffi.cast(rffi.USHORT, port) rffi.setintfield(a, 'c_sin_port', htons(port)) self.unlock() @@ -269,7 +269,7 @@ def get_port(self): a = self.lock(_c.sockaddr_in) - port = ntohs(a.c_sin_port) + port = ntohs(rffi.getintfield(a, 'c_sin_port')) self.unlock() return port @@ -322,7 +322,7 @@ def get_port(self): a = self.lock(_c.sockaddr_in6) - port = ntohs(a.c_sin6_port) + port = ntohs(rffi.getintfield(a, 'c_sin6_port')) self.unlock() return port @@ -1232,13 +1232,13 @@ servent = _c.getservbyname(name, proto) if not servent: raise RSocketError("service/proto not found") - port = rffi.cast(rffi.UINT, servent.c_s_port) + port = rffi.getintfield(servent, 'c_s_port') return ntohs(port) def getservbyport(port, proto=None): # This function is only called from pypy/module/_socket and the range of # port is checked there - port = rffi.cast(rffi.USHORT, port) + assert isinstance(port, int) servent = _c.getservbyport(htons(port), proto) if not servent: raise RSocketError("port/proto not found") From noreply at buildbot.pypy.org Sun Oct 5 19:14:55 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 5 Oct 2014 19:14:55 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: kill 3rd argument of RPythonAnnotator.binding() Message-ID: <20141005171455.388261C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73791:61604ad10046 Date: 2014-10-05 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/61604ad10046/ Log: kill 3rd argument of RPythonAnnotator.binding() Create RPythonAnnotator.annotation() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -18,7 +18,6 @@ log = py.log.Producer("annrpython") py.log.setconsumer("annrpython", ansi_log) -FAIL = object() class RPythonAnnotator(object): """Block annotator for RPython. @@ -143,7 +142,7 @@ # recursively proceed until no more pending block is left if complete_now: self.complete() - return self.binding(flowgraph.getreturnvar(), None) + return self.annotation(flowgraph.getreturnvar()) def gettype(self, variable): """Return the known type of a control flow graph variable, @@ -226,21 +225,25 @@ # policy-dependent computation self.bookkeeper.compute_at_fixpoint() - def binding(self, arg, default=FAIL): + def annotation(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): annvalue = arg.binding - if annvalue is not None: - return annvalue.ann - if default is not FAIL: - return default - else: - raise KeyError + if annvalue is None: + return None + return annvalue.ann elif isinstance(arg, Constant): return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) + def binding(self, arg): + "Gives the SomeValue corresponding to the given Variable or Constant." + s_arg = self.annotation(arg) + if s_arg is None: + raise KeyError + return s_arg + def annvalue(self, arg): if isinstance(arg, Variable): return arg.binding diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -89,14 +89,14 @@ newblocks = self.annotator.added_blocks if newblocks is None: newblocks = self.annotator.annotated # all of them - binding = self.annotator.binding + annotation = self.annotator.annotation for block in newblocks: for op in block.operations: if op.opname in ('simple_call', 'call_args'): yield op # some blocks are partially annotated - if binding(op.result, None) is None: + if annotation(op.result) is None: break # ignore the unannotated part for call_op in call_sites(): @@ -144,15 +144,17 @@ def consider_call_site(self, call_op): from rpython.rtyper.llannotation import SomeLLADTMeth, lltype_to_annotation - binding = self.annotator.binding - s_callable = binding(call_op.args[0]) - args_s = [binding(arg) for arg in call_op.args[1:]] + annotation = self.annotator.annotation + s_callable = annotation(call_op.args[0]) + args_s = [annotation(arg) for arg in call_op.args[1:]] if isinstance(s_callable, SomeLLADTMeth): adtmeth = s_callable s_callable = self.immutablevalue(adtmeth.func) args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): - s_result = binding(call_op.result, s_ImpossibleValue) + s_result = annotation(call_op.result) + if s_result is None: + s_result = s_ImpossibleValue args = call_op.build_args(args_s) self.consider_call_site_for_pbc(s_callable, args, s_result, call_op) @@ -500,8 +502,9 @@ # needed by some kinds of specialization. fn, block, i = self.position_key op = block.operations[i] - s_previous_result = self.annotator.binding(op.result, - s_ImpossibleValue) + s_previous_result = self.annotator.annotation(op.result) + if s_previous_result is None: + s_previous_result = s_ImpossibleValue else: if emulated is True: whence = None diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -17,7 +17,6 @@ from rpython.annotator import model as annmodel, unaryop, binaryop from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation -from rpython.annotator.annrpython import FAIL from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy from rpython.rtyper.error import TyperError @@ -152,8 +151,12 @@ assert result is not None # recursive getrepr()! return result - def binding(self, var, default=FAIL): - s_obj = self.annotator.binding(var, default) + def annotation(self, var): + s_obj = self.annotator.annotation(var) + return s_obj + + def binding(self, var): + s_obj = self.annotator.binding(var) return s_obj def bindingrepr(self, var): @@ -635,7 +638,7 @@ ARG_GCSTRUCT = GCSTRUCT args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] graph = self.annotate_helper(func, args_s) - s = self.annotator.binding(graph.getreturnvar()) + s = self.annotation(graph.getreturnvar()) if (not isinstance(s, SomePtr) or s.ll_ptrtype != Ptr(RuntimeTypeInfo)): raise TyperError("runtime type info function %r returns %r, " @@ -882,7 +885,9 @@ newargs_v = [] for v in args_v: if v.concretetype is Void: - s_value = rtyper.binding(v, default=annmodel.s_None) + s_value = rtyper.annotation(v) + if s_value is None: + s_value = annmodel.s_None if not s_value.is_constant(): raise TyperError("non-constant variable of type Void") if not isinstance(s_value, (annmodel.SomePBC, annmodel.SomeNone)): diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -33,7 +33,7 @@ try: for block in g.iterblocks(): for v in block.getvariables(): - s = annotator.binding(v, None) + s = annotator.annotation(v) if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: raise Found except Found: @@ -44,8 +44,8 @@ annotator = translator.annotator for graph in translator.graphs: et, ev = graph.exceptblock.inputargs - s_et = annotator.binding(et, None) - s_ev = annotator.binding(ev, None) + s_et = annotator.annotation(et) + s_ev = annotator.annotation(ev) if s_et: if s_et.knowntype == type: if s_et.__class__ == annmodel.SomeType: diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -89,8 +89,8 @@ for i in range(len(block.operations)): op = block.operations[i] if op.opname == 'mul': - s0 = self.binding(op.args[0], None) - s1 = self.binding(op.args[1], None) + s0 = self.annotation(op.args[0]) + s1 = self.annotation(op.args[1]) if (isinstance(s0, annmodel.SomeChar) and isinstance(s1, annmodel.SomeInteger)): mul_sources[op.result] = op.args[0], op.args[1] From noreply at buildbot.pypy.org Sun Oct 5 20:22:54 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:22:54 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add "all_threads" parameter to faulthandler.enable(). Message-ID: <20141005182254.30AA71C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73792:827283240a44 Date: 2014-09-18 00:33 +0200 http://bitbucket.org/pypy/pypy/changeset/827283240a44/ Log: Add "all_threads" parameter to faulthandler.enable(). diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -6,6 +6,8 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.error import OperationError, oefmt +MAX_NTHREADS = 100 + cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( includes=[cwd.join('faulthandler.h')], @@ -43,12 +45,18 @@ class FatalErrorState(object): def __init__(self, space): self.enabled = False + self.all_threads = True -def enable(space): - space.fromcache(FatalErrorState).enabled = True + at unwrap_spec(w_file=WrappedDefault(None), + w_all_threads=WrappedDefault(True)) +def enable(space, w_file, w_all_threads): + state = space.fromcache(FatalErrorState) + state.enabled = True + state.all_threads = bool(space.int_w(w_all_threads)) def disable(space): - space.fromcache(FatalErrorState).enabled = False + state = space.fromcache(FatalErrorState) + state.enabled = False def is_enabled(space): return space.wrap(space.fromcache(FatalErrorState).enabled) @@ -60,25 +68,40 @@ @unwrap_spec(w_file=WrappedDefault(None), w_all_threads=WrappedDefault(True)) def dump_traceback(space, w_file, w_all_threads): - ec = space.getexecutioncontext() - ecs = space.threadlocals.getallvalues() + current_ec = space.getexecutioncontext() + if space.int_w(w_all_threads): + ecs = space.threadlocals.getallvalues() + else: + ecs = {0: current_ec} if space.is_none(w_file): w_file = space.sys.get('stderr') fd = space.c_filedescriptor_w(w_file) - frame = ec.gettopframe() - while frame: - code = frame.pycode - lineno = frame.get_last_lineno() - if code: - os.write(fd, "File \"%s\", line %s in %s\n" % ( - code.co_filename, lineno, code.co_name)) + nthreads = 0 + for thread_ident, ec in ecs.items(): + if nthreads: + os.write(fd, "\n") + if nthreads >= MAX_NTHREADS: + os.write(fd, "...\n") + break + if ec is current_ec: + os.write(fd, "Current thread 0x%x:\n" % thread_ident) else: - os.write(fd, "File ???, line %s in ???\n" % ( - lineno,)) + os.write(fd, "Thread 0x%x:\n" % thread_ident) - frame = frame.f_backref() + frame = ec.gettopframe() + while frame: + code = frame.pycode + lineno = frame.get_last_lineno() + if code: + os.write(fd, "File \"%s\", line %s in %s\n" % ( + code.co_filename, lineno, code.co_name)) + else: + os.write(fd, "File ???, line %s in ???\n" % ( + lineno,)) + + frame = frame.f_backref() @unwrap_spec(w_release_gil=WrappedDefault(False)) diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py --- a/pypy/module/faulthandler/test/test_faulthander.py +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -4,13 +4,14 @@ } def test_enable(self): - import faulthandler + import faulthandler, sys faulthandler.enable() assert faulthandler.is_enabled() is True + faulthandler.enable(file=sys.stderr, all_threads=True) faulthandler.disable() assert faulthandler.is_enabled() is False def test_dump_traceback(self): - import faulthandler + import faulthandler, sys faulthandler.dump_traceback() - + faulthandler.dump_traceback(file=sys.stderr, all_threads=True) diff --git a/pypy/module/faulthandler/test/test_ztranslation.py b/pypy/module/faulthandler/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_faulthandler_translates(): + checkmodule('faulthandler') From noreply at buildbot.pypy.org Sun Oct 5 20:22:55 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:22:55 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Ensure that _decimal exception types have a __module__, otherwise this confuses traceback.py. Message-ID: <20141005182255.8E9F21C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73793:bef69d4879cd Date: 2014-09-18 00:52 +0200 http://bitbucket.org/pypy/pypy/changeset/bef69d4879cd/ Log: Ensure that _decimal exception types have a __module__, otherwise this confuses traceback.py. diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -77,51 +77,55 @@ class SignalState: def __init__(self, space): + w_typedict = space.newdict() + space.setitem(w_typedict, + space.wrap("__module__"), space.wrap("decimal")) + self.w_DecimalException = space.call_function( space.w_type, space.wrap("DecimalException"), space.newtuple([space.w_ArithmeticError]), - space.newdict()) + w_typedict) self.w_Clamped = space.call_function( space.w_type, space.wrap("Clamped"), space.newtuple([self.w_DecimalException]), - space.newdict()) + w_typedict) self.w_Rounded = space.call_function( space.w_type, space.wrap("Rounded"), space.newtuple([self.w_DecimalException]), - space.newdict()) + w_typedict) self.w_Inexact = space.call_function( space.w_type, space.wrap("Inexact"), space.newtuple([self.w_DecimalException]), - space.newdict()) + w_typedict) self.w_Subnormal = space.call_function( space.w_type, space.wrap("Subnormal"), space.newtuple([self.w_DecimalException]), - space.newdict()) + w_typedict) self.w_Underflow = space.call_function( space.w_type, space.wrap("Underflow"), space.newtuple([self.w_Inexact, self.w_Rounded, self.w_Subnormal]), - space.newdict()) + w_typedict) self.w_Overflow = space.call_function( space.w_type, space.wrap("Overflow"), space.newtuple([self.w_Inexact, self.w_Rounded]), - space.newdict()) + w_typedict) self.w_DivisionByZero = space.call_function( space.w_type, space.wrap("DivisionByZero"), space.newtuple([self.w_DecimalException, space.w_ZeroDivisionError]), - space.newdict()) + w_typedict) self.w_InvalidOperation = space.call_function( space.w_type, space.wrap("InvalidOperation"), space.newtuple([self.w_DecimalException]), - space.newdict()) + w_typedict) self.w_FloatOperation = space.call_function( space.w_type, space.wrap("FloatOperation"), space.newtuple([self.w_DecimalException, space.w_TypeError]), - space.newdict()) + w_typedict) self.w_SignalTuple = space.newtuple([ getattr(self, 'w_' + name) @@ -142,7 +146,7 @@ w_bases = space.newtuple([self.w_InvalidOperation]) setattr(self, 'w_' + name, space.call_function( - space.w_type, space.wrap(name), w_bases, space.newdict())) + space.w_type, space.wrap(name), w_bases, w_typedict)) def get(space): return space.fromcache(SignalState) diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -50,6 +50,7 @@ ex = getattr(_decimal, name) assert issubclass(ex, _decimal.DecimalException) assert issubclass(ex, ArithmeticError) + assert ex.__module__ == 'decimal' def test_exception_hierarchy(self): import _decimal as decimal From noreply at buildbot.pypy.org Sun Oct 5 20:22:56 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:22:56 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Refresh libmpdec with the version shipped with cpython-3.3.5 Message-ID: <20141005182256.E12EB1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73794:a9f2aa1662c0 Date: 2014-09-19 00:00 +0200 http://bitbucket.org/pypy/pypy/changeset/a9f2aa1662c0/ Log: Refresh libmpdec with the version shipped with cpython-3.3.5 diff --git a/rpython/translator/c/src/libmpdec/basearith.c b/rpython/translator/c/src/libmpdec/basearith.c --- a/rpython/translator/c/src/libmpdec/basearith.c +++ b/rpython/translator/c/src/libmpdec/basearith.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/basearith.h b/rpython/translator/c/src/libmpdec/basearith.h --- a/rpython/translator/c/src/libmpdec/basearith.h +++ b/rpython/translator/c/src/libmpdec/basearith.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/bits.h b/rpython/translator/c/src/libmpdec/bits.h --- a/rpython/translator/c/src/libmpdec/bits.h +++ b/rpython/translator/c/src/libmpdec/bits.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/constants.c b/rpython/translator/c/src/libmpdec/constants.c --- a/rpython/translator/c/src/libmpdec/constants.c +++ b/rpython/translator/c/src/libmpdec/constants.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/constants.h b/rpython/translator/c/src/libmpdec/constants.h --- a/rpython/translator/c/src/libmpdec/constants.h +++ b/rpython/translator/c/src/libmpdec/constants.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/context.c b/rpython/translator/c/src/libmpdec/context.c --- a/rpython/translator/c/src/libmpdec/context.c +++ b/rpython/translator/c/src/libmpdec/context.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/convolute.c b/rpython/translator/c/src/libmpdec/convolute.c --- a/rpython/translator/c/src/libmpdec/convolute.c +++ b/rpython/translator/c/src/libmpdec/convolute.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/convolute.h b/rpython/translator/c/src/libmpdec/convolute.h --- a/rpython/translator/c/src/libmpdec/convolute.h +++ b/rpython/translator/c/src/libmpdec/convolute.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/crt.c b/rpython/translator/c/src/libmpdec/crt.c --- a/rpython/translator/c/src/libmpdec/crt.c +++ b/rpython/translator/c/src/libmpdec/crt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/crt.h b/rpython/translator/c/src/libmpdec/crt.h --- a/rpython/translator/c/src/libmpdec/crt.h +++ b/rpython/translator/c/src/libmpdec/crt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/difradix2.c b/rpython/translator/c/src/libmpdec/difradix2.c --- a/rpython/translator/c/src/libmpdec/difradix2.c +++ b/rpython/translator/c/src/libmpdec/difradix2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/difradix2.h b/rpython/translator/c/src/libmpdec/difradix2.h --- a/rpython/translator/c/src/libmpdec/difradix2.h +++ b/rpython/translator/c/src/libmpdec/difradix2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/fnt.c b/rpython/translator/c/src/libmpdec/fnt.c --- a/rpython/translator/c/src/libmpdec/fnt.c +++ b/rpython/translator/c/src/libmpdec/fnt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/fnt.h b/rpython/translator/c/src/libmpdec/fnt.h --- a/rpython/translator/c/src/libmpdec/fnt.h +++ b/rpython/translator/c/src/libmpdec/fnt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/fourstep.c b/rpython/translator/c/src/libmpdec/fourstep.c --- a/rpython/translator/c/src/libmpdec/fourstep.c +++ b/rpython/translator/c/src/libmpdec/fourstep.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/fourstep.h b/rpython/translator/c/src/libmpdec/fourstep.h --- a/rpython/translator/c/src/libmpdec/fourstep.h +++ b/rpython/translator/c/src/libmpdec/fourstep.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/io.c b/rpython/translator/c/src/libmpdec/io.c --- a/rpython/translator/c/src/libmpdec/io.c +++ b/rpython/translator/c/src/libmpdec/io.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/io.h b/rpython/translator/c/src/libmpdec/io.h --- a/rpython/translator/c/src/libmpdec/io.h +++ b/rpython/translator/c/src/libmpdec/io.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/memory.c b/rpython/translator/c/src/libmpdec/memory.c --- a/rpython/translator/c/src/libmpdec/memory.c +++ b/rpython/translator/c/src/libmpdec/memory.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/memory.h b/rpython/translator/c/src/libmpdec/memory.h --- a/rpython/translator/c/src/libmpdec/memory.h +++ b/rpython/translator/c/src/libmpdec/memory.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/mpdecimal.c b/rpython/translator/c/src/libmpdec/mpdecimal.c --- a/rpython/translator/c/src/libmpdec/mpdecimal.c +++ b/rpython/translator/c/src/libmpdec/mpdecimal.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -97,6 +97,8 @@ mpd_ssize_t exp); static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size); +static int _mpd_cmp_abs(const mpd_t *a, const mpd_t *b); + static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status); static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b, @@ -111,6 +113,17 @@ /******************************************************************************/ +/* Version */ +/******************************************************************************/ + +const char * +mpd_version(void) +{ + return MPD_VERSION; +} + + +/******************************************************************************/ /* Performance critical inline functions */ /******************************************************************************/ @@ -379,42 +392,42 @@ /* Dynamic decimal */ ALWAYS_INLINE int -mpd_isdynamic(mpd_t *dec) +mpd_isdynamic(const mpd_t *dec) { return !(dec->flags & MPD_STATIC); } /* Static decimal */ ALWAYS_INLINE int -mpd_isstatic(mpd_t *dec) +mpd_isstatic(const mpd_t *dec) { return dec->flags & MPD_STATIC; } /* Data of decimal is dynamic */ ALWAYS_INLINE int -mpd_isdynamic_data(mpd_t *dec) +mpd_isdynamic_data(const mpd_t *dec) { return !(dec->flags & MPD_DATAFLAGS); } /* Data of decimal is static */ ALWAYS_INLINE int -mpd_isstatic_data(mpd_t *dec) +mpd_isstatic_data(const mpd_t *dec) { return dec->flags & MPD_STATIC_DATA; } /* Data of decimal is shared */ ALWAYS_INLINE int -mpd_isshared_data(mpd_t *dec) +mpd_isshared_data(const mpd_t *dec) { return dec->flags & MPD_SHARED_DATA; } /* Data of decimal is const */ ALWAYS_INLINE int -mpd_isconst_data(mpd_t *dec) +mpd_isconst_data(const mpd_t *dec) { return dec->flags & MPD_CONST_DATA; } @@ -584,7 +597,7 @@ /* Copy sign from another decimal */ ALWAYS_INLINE void -mpd_signcpy(mpd_t *result, mpd_t *a) +mpd_signcpy(mpd_t *result, const mpd_t *a) { uint8_t sign = a->flags&MPD_NEG; @@ -1345,6 +1358,91 @@ return MPD_SSIZE_MAX; } +#if defined(CONFIG_32) && !defined(LEGACY_COMPILER) +/* + * Quietly get a uint64_t from a decimal. If the operation is impossible, + * MPD_Invalid_operation is set. + */ +static uint64_t +_c32_qget_u64(int use_sign, const mpd_t *a, uint32_t *status) +{ + MPD_NEW_STATIC(tmp,0,0,20,3); + mpd_context_t maxcontext; + uint64_t ret; + + tmp_data[0] = 709551615; + tmp_data[1] = 446744073; + tmp_data[2] = 18; + + if (mpd_isspecial(a)) { + *status |= MPD_Invalid_operation; + return UINT64_MAX; + } + if (mpd_iszero(a)) { + return 0; + } + if (use_sign && mpd_isnegative(a)) { + *status |= MPD_Invalid_operation; + return UINT64_MAX; + } + if (!_mpd_isint(a)) { + *status |= MPD_Invalid_operation; + return UINT64_MAX; + } + + if (_mpd_cmp_abs(a, &tmp) > 0) { + *status |= MPD_Invalid_operation; + return UINT64_MAX; + } + + mpd_maxcontext(&maxcontext); + mpd_qrescale(&tmp, a, 0, &maxcontext, &maxcontext.status); + maxcontext.status &= ~MPD_Rounded; + if (maxcontext.status != 0) { + *status |= (maxcontext.status|MPD_Invalid_operation); /* GCOV_NOT_REACHED */ + return UINT64_MAX; /* GCOV_NOT_REACHED */ + } + + ret = 0; + switch (tmp.len) { + case 3: + ret += (uint64_t)tmp_data[2] * 1000000000000000000ULL; + case 2: + ret += (uint64_t)tmp_data[1] * 1000000000ULL; + case 1: + ret += tmp_data[0]; + break; + default: + abort(); /* GCOV_NOT_REACHED */ + } + + return ret; +} + +static int64_t +_c32_qget_i64(const mpd_t *a, uint32_t *status) +{ + uint64_t u; + int isneg; + + u = _c32_qget_u64(0, a, status); + if (*status&MPD_Invalid_operation) { + return INT64_MAX; + } + + isneg = mpd_isnegative(a); + if (u <= INT64_MAX) { + return isneg ? -((int64_t)u) : (int64_t)u; + } + else if (isneg && u+(INT64_MIN+INT64_MAX) == INT64_MAX) { + return INT64_MIN; + } + + *status |= MPD_Invalid_operation; + return INT64_MAX; +} +#endif /* CONFIG_32 && !LEGACY_COMPILER */ + #ifdef CONFIG_64 /* quietly get a uint64_t from a decimal */ uint64_t @@ -1359,7 +1457,57 @@ { return mpd_qget_ssize(a, status); } + +/* quietly get a uint32_t from a decimal */ +uint32_t +mpd_qget_u32(const mpd_t *a, uint32_t *status) +{ + uint64_t x = mpd_qget_uint(a, status); + + if (*status&MPD_Invalid_operation) { + return UINT32_MAX; + } + if (x > UINT32_MAX) { + *status |= MPD_Invalid_operation; + return UINT32_MAX; + } + + return (uint32_t)x; +} + +/* quietly get an int32_t from a decimal */ +int32_t +mpd_qget_i32(const mpd_t *a, uint32_t *status) +{ + int64_t x = mpd_qget_ssize(a, status); + + if (*status&MPD_Invalid_operation) { + return INT32_MAX; + } + if (x < INT32_MIN || x > INT32_MAX) { + *status |= MPD_Invalid_operation; + return INT32_MAX; + } + + return (int32_t)x; +} #else +#ifndef LEGACY_COMPILER +/* quietly get a uint64_t from a decimal */ +uint64_t +mpd_qget_u64(const mpd_t *a, uint32_t *status) +{ + return _c32_qget_u64(1, a, status); +} + +/* quietly get an int64_t from a decimal */ +int64_t +mpd_qget_i64(const mpd_t *a, uint32_t *status) +{ + return _c32_qget_i64(a, status); +} +#endif + /* quietly get a uint32_t from a decimal */ uint32_t mpd_qget_u32(const mpd_t *a, uint32_t *status) @@ -3054,9 +3202,9 @@ } static inline void -_mpd_ptrswap(mpd_t **a, mpd_t **b) -{ - mpd_t *t = *a; +_mpd_ptrswap(const mpd_t **a, const mpd_t **b) +{ + const mpd_t *t = *a; *a = *b; *b = t; } @@ -3084,7 +3232,7 @@ _mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b, const mpd_context_t *ctx, uint32_t *status) { - mpd_t *big, *small; + const mpd_t *big, *small; MPD_NEW_STATIC(big_aligned,0,0,0,0); MPD_NEW_CONST(tiny,0,0,1,1,1,1); mpd_uint_t carry; @@ -3094,7 +3242,7 @@ /* compare exponents */ - big = (mpd_t *)a; small = (mpd_t *)b; + big = a; small = b; if (big->exp != small->exp) { if (small->exp > big->exp) { _mpd_ptrswap(&big, &small); @@ -3386,6 +3534,34 @@ { mpd_qadd_uint(result, a, b, ctx, status); } +#elif !defined(LEGACY_COMPILER) +/* Add decimal and int64_t. */ +void +mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_i64(&bb, b, &maxcontext, status); + mpd_qadd(result, a, &bb, ctx, status); + mpd_del(&bb); +} + +/* Add decimal and uint64_t. */ +void +mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_u64(&bb, b, &maxcontext, status); + mpd_qadd(result, a, &bb, ctx, status); + mpd_del(&bb); +} #endif /* Subtract int32_t from decimal. */ @@ -3420,6 +3596,34 @@ { mpd_qsub_uint(result, a, b, ctx, status); } +#elif !defined(LEGACY_COMPILER) +/* Subtract int64_t from decimal. */ +void +mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_i64(&bb, b, &maxcontext, status); + mpd_qsub(result, a, &bb, ctx, status); + mpd_del(&bb); +} + +/* Subtract uint64_t from decimal. */ +void +mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_u64(&bb, b, &maxcontext, status); + mpd_qsub(result, a, &bb, ctx, status); + mpd_del(&bb); +} #endif @@ -3871,6 +4075,34 @@ { mpd_qdiv_uint(result, a, b, ctx, status); } +#elif !defined(LEGACY_COMPILER) +/* Divide decimal by int64_t. */ +void +mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_i64(&bb, b, &maxcontext, status); + mpd_qdiv(result, a, &bb, ctx, status); + mpd_del(&bb); +} + +/* Divide decimal by uint64_t. */ +void +mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_u64(&bb, b, &maxcontext, status); + mpd_qdiv(result, a, &bb, ctx, status); + mpd_del(&bb); +} #endif /* Pad the result with trailing zeros if it has fewer digits than prec. */ @@ -4189,21 +4421,22 @@ const mpd_context_t *ctx, uint32_t *status) { uint32_t workstatus = 0; - mpd_t *cc = (mpd_t *)c; + mpd_t *cc = NULL; if (result == c) { if ((cc = mpd_qncopy(c)) == NULL) { mpd_seterror(result, MPD_Malloc_error, status); return; } + c = cc; } _mpd_qmul(result, a, b, ctx, &workstatus); if (!(workstatus&MPD_Invalid_operation)) { - mpd_qadd(result, result, cc, ctx, &workstatus); - } - - if (cc != c) mpd_del(cc); + mpd_qadd(result, result, c, ctx, &workstatus); + } + + if (cc) mpd_del(cc); *status |= workstatus; } @@ -5495,7 +5728,7 @@ _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status) { - mpd_t *big = (mpd_t *)a, *small = (mpd_t *)b; + const mpd_t *big = a, *small = b; mpd_uint_t *rdata = NULL; mpd_uint_t rbuf[MPD_MINALLOC_MAX]; mpd_size_t rsize, i; @@ -5664,6 +5897,34 @@ { mpd_qmul_uint(result, a, b, ctx, status); } +#elif !defined(LEGACY_COMPILER) +/* Multiply decimal and int64_t. */ +void +mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_i64(&bb, b, &maxcontext, status); + mpd_qmul(result, a, &bb, ctx, status); + mpd_del(&bb); +} + +/* Multiply decimal and uint64_t. */ +void +mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b, + const mpd_context_t *ctx, uint32_t *status) +{ + mpd_context_t maxcontext; + MPD_NEW_STATIC(bb,0,0,0,0); + + mpd_maxcontext(&maxcontext); + mpd_qset_u64(&bb, b, &maxcontext, status); + mpd_qmul(result, a, &bb, ctx, status); + mpd_del(&bb); +} #endif /* Like the minus operator. */ diff --git a/rpython/translator/c/src/libmpdec/mpdecimal.h b/rpython/translator/c/src/libmpdec/mpdecimal.h --- a/rpython/translator/c/src/libmpdec/mpdecimal.h +++ b/rpython/translator/c/src/libmpdec/mpdecimal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,10 @@ #ifdef __cplusplus extern "C" { -#define __STDC_LIMIT_MACROS + #ifndef __STDC_LIMIT_MACROS + #define __STDC_LIMIT_MACROS + #define MPD_CLEAR_STDC_LIMIT_MACROS + #endif #endif @@ -103,6 +106,19 @@ /******************************************************************************/ +/* Version */ +/******************************************************************************/ + +#define MPD_MAJOR_VERSION 2 +#define MPD_MINOR_VERSION 4 +#define MPD_MICRO_VERSION 0 + +#define MPD_VERSION "2.4.0" + +const char *mpd_version(void); + + +/******************************************************************************/ /* Configuration */ /******************************************************************************/ @@ -244,7 +260,7 @@ extern const char *mpd_clamp_string[MPD_CLAMP_GUARD]; -typedef struct { +typedef struct mpd_context_t { mpd_ssize_t prec; /* precision */ mpd_ssize_t emax; /* max positive exp */ mpd_ssize_t emin; /* min negative exp */ @@ -356,7 +372,7 @@ #define MPD_DATAFLAGS (MPD_STATIC_DATA|MPD_SHARED_DATA|MPD_CONST_DATA) /* mpd_t */ -typedef struct { +typedef struct mpd_t { uint8_t flags; mpd_ssize_t exp; mpd_ssize_t digits; @@ -374,7 +390,7 @@ /******************************************************************************/ /* format specification */ -typedef struct { +typedef struct mpd_spec_t { mpd_ssize_t min_width; /* minimum field width */ mpd_ssize_t prec; /* fraction digits or significant digits */ char type; /* conversion specifier */ @@ -393,7 +409,7 @@ mpd_ssize_t mpd_to_eng_size(char **res, const mpd_t *dec, int fmt); int mpd_validate_lconv(mpd_spec_t *spec); int mpd_parse_fmt_str(mpd_spec_t *spec, const char *fmt, int caps); -char * mpd_qformat_spec(const mpd_t *dec, const mpd_spec_t *spec, const mpd_context_t *ctx, uint32_t *status); +char *mpd_qformat_spec(const mpd_t *dec, const mpd_spec_t *spec, const mpd_context_t *ctx, uint32_t *status); char *mpd_qformat(const mpd_t *dec, const char *fmt, const mpd_context_t *ctx, uint32_t *status); #define MPD_NUM_FLAGS 15 @@ -440,13 +456,19 @@ mpd_uint_t mpd_qget_uint(const mpd_t *dec, uint32_t *status); mpd_uint_t mpd_qabs_uint(const mpd_t *dec, uint32_t *status); +int32_t mpd_qget_i32(const mpd_t *dec, uint32_t *status); +uint32_t mpd_qget_u32(const mpd_t *dec, uint32_t *status); +#ifndef LEGACY_COMPILER +int64_t mpd_qget_i64(const mpd_t *dec, uint32_t *status); +uint64_t mpd_qget_u64(const mpd_t *dec, uint32_t *status); +#endif /* quiet functions */ int mpd_qcheck_nan(mpd_t *nanresult, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status); int mpd_qcheck_nans(mpd_t *nanresult, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status); void mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status); -const char * mpd_class(const mpd_t *a, const mpd_context_t *ctx); +const char *mpd_class(const mpd_t *a, const mpd_context_t *ctx); int mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status); mpd_t *mpd_qncopy(const mpd_t *a); @@ -531,6 +553,17 @@ void mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status); void mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status); +#ifndef LEGACY_COMPILER +void mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); +void mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); +#endif + size_t mpd_sizeinbase(const mpd_t *a, uint32_t base); void mpd_qimport_u16(mpd_t *result, const uint16_t *srcdata, size_t srclen, @@ -549,7 +582,7 @@ /* Signalling functions */ /******************************************************************************/ -char * mpd_format(const mpd_t *dec, const char *fmt, mpd_context_t *ctx); +char *mpd_format(const mpd_t *dec, const char *fmt, mpd_context_t *ctx); void mpd_import_u16(mpd_t *result, const uint16_t *srcdata, size_t srclen, uint8_t srcsign, uint32_t base, mpd_context_t *ctx); void mpd_import_u32(mpd_t *result, const uint32_t *srcdata, size_t srclen, uint8_t srcsign, uint32_t base, mpd_context_t *ctx); size_t mpd_export_u16(uint16_t **rdata, size_t rlen, uint32_t base, const mpd_t *src, mpd_context_t *ctx); @@ -574,6 +607,12 @@ mpd_ssize_t mpd_get_ssize(const mpd_t *a, mpd_context_t *ctx); mpd_uint_t mpd_get_uint(const mpd_t *a, mpd_context_t *ctx); mpd_uint_t mpd_abs_uint(const mpd_t *a, mpd_context_t *ctx); +int32_t mpd_get_i32(const mpd_t *a, mpd_context_t *ctx); +uint32_t mpd_get_u32(const mpd_t *a, mpd_context_t *ctx); +#ifndef LEGACY_COMPILER +int64_t mpd_get_i64(const mpd_t *a, mpd_context_t *ctx); +uint64_t mpd_get_u64(const mpd_t *a, mpd_context_t *ctx); +#endif void mpd_and(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx); void mpd_copy(mpd_t *result, const mpd_t *a, mpd_context_t *ctx); void mpd_canonical(mpd_t *result, const mpd_t *a, mpd_context_t *ctx); @@ -644,6 +683,17 @@ void mpd_sqrt(mpd_t *result, const mpd_t *a, mpd_context_t *ctx); void mpd_invroot(mpd_t *result, const mpd_t *a, mpd_context_t *ctx); +#ifndef LEGACY_COMPILER +void mpd_add_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); +void mpd_add_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); +void mpd_sub_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); +void mpd_sub_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); +void mpd_div_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); +void mpd_div_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); +void mpd_mul_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); +void mpd_mul_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); +#endif + /******************************************************************************/ /* Configuration specific */ @@ -652,36 +702,8 @@ #ifdef CONFIG_64 void mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx, uint32_t *status); void mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx, uint32_t *status); -int64_t mpd_qget_i64(const mpd_t *dec, uint32_t *status); -uint64_t mpd_qget_u64(const mpd_t *dec, uint32_t *status); - -void mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status); -void mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status); - void mpd_sset_i64(mpd_t *result, int64_t a, mpd_context_t *ctx); void mpd_sset_u64(mpd_t *result, uint64_t a, mpd_context_t *ctx); -int64_t mpd_get_i64(const mpd_t *a, mpd_context_t *ctx); -uint64_t mpd_get_u64(const mpd_t *a, mpd_context_t *ctx); - -void mpd_add_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); -void mpd_add_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); -void mpd_sub_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); -void mpd_sub_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); -void mpd_div_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); -void mpd_div_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); -void mpd_mul_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx); -void mpd_mul_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx); -#else -int32_t mpd_qget_i32(const mpd_t *dec, uint32_t *status); -uint32_t mpd_qget_u32(const mpd_t *dec, uint32_t *status); -int32_t mpd_get_i32(const mpd_t *a, mpd_context_t *ctx); -uint32_t mpd_get_u32(const mpd_t *a, mpd_context_t *ctx); #endif @@ -731,12 +753,12 @@ /* 1 if dec is positive, -1 if dec is negative */ EXTINLINE int mpd_arith_sign(const mpd_t *dec); EXTINLINE long mpd_radix(void); -EXTINLINE int mpd_isdynamic(mpd_t *dec); -EXTINLINE int mpd_isstatic(mpd_t *dec); -EXTINLINE int mpd_isdynamic_data(mpd_t *dec); -EXTINLINE int mpd_isstatic_data(mpd_t *dec); -EXTINLINE int mpd_isshared_data(mpd_t *dec); -EXTINLINE int mpd_isconst_data(mpd_t *dec); +EXTINLINE int mpd_isdynamic(const mpd_t *dec); +EXTINLINE int mpd_isstatic(const mpd_t *dec); +EXTINLINE int mpd_isdynamic_data(const mpd_t *dec); +EXTINLINE int mpd_isstatic_data(const mpd_t *dec); +EXTINLINE int mpd_isshared_data(const mpd_t *dec); +EXTINLINE int mpd_isconst_data(const mpd_t *dec); EXTINLINE mpd_ssize_t mpd_trail_zeros(const mpd_t *dec); @@ -748,7 +770,7 @@ EXTINLINE void mpd_setdigits(mpd_t *result); EXTINLINE void mpd_set_sign(mpd_t *result, uint8_t sign); /* copy sign from another decimal */ -EXTINLINE void mpd_signcpy(mpd_t *result, mpd_t *a); +EXTINLINE void mpd_signcpy(mpd_t *result, const mpd_t *a); EXTINLINE void mpd_set_infinity(mpd_t *result); EXTINLINE void mpd_set_qnan(mpd_t *result); EXTINLINE void mpd_set_snan(mpd_t *result); @@ -815,6 +837,10 @@ #ifdef __cplusplus + #ifdef MPD_CLEAR_STDC_LIMIT_MACROS + #undef MPD_CLEAR_STDC_LIMIT_MACROS + #undef __STDC_LIMIT_MACROS + #endif } /* END extern "C" */ #endif diff --git a/rpython/translator/c/src/libmpdec/numbertheory.c b/rpython/translator/c/src/libmpdec/numbertheory.c --- a/rpython/translator/c/src/libmpdec/numbertheory.c +++ b/rpython/translator/c/src/libmpdec/numbertheory.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/numbertheory.h b/rpython/translator/c/src/libmpdec/numbertheory.h --- a/rpython/translator/c/src/libmpdec/numbertheory.h +++ b/rpython/translator/c/src/libmpdec/numbertheory.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/sixstep.c b/rpython/translator/c/src/libmpdec/sixstep.c --- a/rpython/translator/c/src/libmpdec/sixstep.c +++ b/rpython/translator/c/src/libmpdec/sixstep.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/sixstep.h b/rpython/translator/c/src/libmpdec/sixstep.h --- a/rpython/translator/c/src/libmpdec/sixstep.h +++ b/rpython/translator/c/src/libmpdec/sixstep.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/transpose.c b/rpython/translator/c/src/libmpdec/transpose.c --- a/rpython/translator/c/src/libmpdec/transpose.c +++ b/rpython/translator/c/src/libmpdec/transpose.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -169,7 +169,7 @@ /* * Transpose 2^n * 2^n matrix. For cache efficiency, the matrix is split into * square blocks with side length 'SIDE'. First, the blocks are transposed, - * then a square tranposition is done on each individual block. + * then a square transposition is done on each individual block. */ static void squaretrans_pow2(mpd_uint_t *matrix, mpd_size_t size) diff --git a/rpython/translator/c/src/libmpdec/transpose.h b/rpython/translator/c/src/libmpdec/transpose.h --- a/rpython/translator/c/src/libmpdec/transpose.h +++ b/rpython/translator/c/src/libmpdec/transpose.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/typearith.h b/rpython/translator/c/src/libmpdec/typearith.h --- a/rpython/translator/c/src/libmpdec/typearith.h +++ b/rpython/translator/c/src/libmpdec/typearith.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/umodarith.h b/rpython/translator/c/src/libmpdec/umodarith.h --- a/rpython/translator/c/src/libmpdec/umodarith.h +++ b/rpython/translator/c/src/libmpdec/umodarith.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/vccompat.h b/rpython/translator/c/src/libmpdec/vccompat.h --- a/rpython/translator/c/src/libmpdec/vccompat.h +++ b/rpython/translator/c/src/libmpdec/vccompat.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * Copyright (c) 2008-2016 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/rpython/translator/c/src/libmpdec/vcdiv64.asm b/rpython/translator/c/src/libmpdec/vcdiv64.asm --- a/rpython/translator/c/src/libmpdec/vcdiv64.asm +++ b/rpython/translator/c/src/libmpdec/vcdiv64.asm @@ -1,5 +1,5 @@ ; -; Copyright (c) 2008-2012 Stefan Krah. All rights reserved. +; Copyright (c) 2008-2016 Stefan Krah. All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions From noreply at buildbot.pypy.org Sun Oct 5 20:22:58 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:22:58 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add _decimal.__version__ and __libmpdec_version__ Message-ID: <20141005182258.3A4901C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73795:d7400a663d3b Date: 2014-09-19 00:04 +0200 http://bitbucket.org/pypy/pypy/changeset/d7400a663d3b/ Log: Add _decimal.__version__ and __libmpdec_version__ diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -24,6 +24,9 @@ 'MIN_ETINY': 'space.wrap(interp_decimal.MIN_ETINY)', 'HAVE_THREADS': 'space.wrap(space.config.translation.thread)', + + '__version__': 'space.wrap(interp_decimal.VERSION)', + '__libmpdec_version__': 'space.wrap(interp_decimal.LIBMPDEC_VERSION)', } for name in rmpdec.ROUND_CONSTANTS: interpleveldefs[name] = 'space.wrap(%r)' % name diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -12,6 +12,9 @@ from pypy.objspace.std.floatobject import HASH_MODULUS, HASH_INF, HASH_NAN from pypy.module._decimal import interp_context +VERSION = "1.7" +LIBMPDEC_VERSION = rffi.charp2str(rmpdec.mpd_version()) + if HASH_MODULUS == 2**31 - 1: INVERSE_10_MODULUS = 1503238553 elif HASH_MODULUS == 2**61 - 1: diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -9,6 +9,11 @@ import _decimal assert isinstance(_decimal.Decimal, type) + def test_versions(self): + import _decimal + assert isinstance(_decimal.__version__, str) + assert isinstance(_decimal.__libmpdec_version__, str) + def test_context(self): import _decimal context = _decimal.Context( diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -64,6 +64,7 @@ "mpd_qand", "mpd_qor", "mpd_qxor", "mpd_qcopy_sign", "mpd_qcopy_abs", "mpd_qcopy_negate", "mpd_qround_to_int", "mpd_qround_to_intx", + "mpd_version", ], compile_extra=compile_extra, libraries=['m'], @@ -393,3 +394,5 @@ mpd_qround_to_intx = external( 'mpd_qround_to_intx', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + +mpd_version = external('mpd_version', [], rffi.CCHARP, macro=True) From noreply at buildbot.pypy.org Sun Oct 5 20:22:59 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:22:59 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: hg merge py3.3 Message-ID: <20141005182259.8854F1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73796:0bc729d9c30e Date: 2014-09-19 01:17 +0200 http://bitbucket.org/pypy/pypy/changeset/0bc729d9c30e/ Log: hg merge py3.3 diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -6,6 +6,8 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.error import OperationError, oefmt +MAX_NTHREADS = 100 + cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( includes=[cwd.join('faulthandler.h')], @@ -43,12 +45,18 @@ class FatalErrorState(object): def __init__(self, space): self.enabled = False + self.all_threads = True -def enable(space): - space.fromcache(FatalErrorState).enabled = True + at unwrap_spec(w_file=WrappedDefault(None), + w_all_threads=WrappedDefault(True)) +def enable(space, w_file, w_all_threads): + state = space.fromcache(FatalErrorState) + state.enabled = True + state.all_threads = bool(space.int_w(w_all_threads)) def disable(space): - space.fromcache(FatalErrorState).enabled = False + state = space.fromcache(FatalErrorState) + state.enabled = False def is_enabled(space): return space.wrap(space.fromcache(FatalErrorState).enabled) @@ -60,25 +68,40 @@ @unwrap_spec(w_file=WrappedDefault(None), w_all_threads=WrappedDefault(True)) def dump_traceback(space, w_file, w_all_threads): - ec = space.getexecutioncontext() - ecs = space.threadlocals.getallvalues() + current_ec = space.getexecutioncontext() + if space.int_w(w_all_threads): + ecs = space.threadlocals.getallvalues() + else: + ecs = {0: current_ec} if space.is_none(w_file): w_file = space.sys.get('stderr') fd = space.c_filedescriptor_w(w_file) - frame = ec.gettopframe() - while frame: - code = frame.pycode - lineno = frame.get_last_lineno() - if code: - os.write(fd, "File \"%s\", line %s in %s\n" % ( - code.co_filename, lineno, code.co_name)) + nthreads = 0 + for thread_ident, ec in ecs.items(): + if nthreads: + os.write(fd, "\n") + if nthreads >= MAX_NTHREADS: + os.write(fd, "...\n") + break + if ec is current_ec: + os.write(fd, "Current thread 0x%x:\n" % thread_ident) else: - os.write(fd, "File ???, line %s in ???\n" % ( - lineno,)) + os.write(fd, "Thread 0x%x:\n" % thread_ident) - frame = frame.f_backref() + frame = ec.gettopframe() + while frame: + code = frame.pycode + lineno = frame.get_last_lineno() + if code: + os.write(fd, "File \"%s\", line %s in %s\n" % ( + code.co_filename, lineno, code.co_name)) + else: + os.write(fd, "File ???, line %s in ???\n" % ( + lineno,)) + + frame = frame.f_backref() @unwrap_spec(w_release_gil=WrappedDefault(False)) diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py --- a/pypy/module/faulthandler/test/test_faulthander.py +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -4,13 +4,14 @@ } def test_enable(self): - import faulthandler + import faulthandler, sys faulthandler.enable() assert faulthandler.is_enabled() is True + faulthandler.enable(file=sys.stderr, all_threads=True) faulthandler.disable() assert faulthandler.is_enabled() is False def test_dump_traceback(self): - import faulthandler + import faulthandler, sys faulthandler.dump_traceback() - + faulthandler.dump_traceback(file=sys.stderr, all_threads=True) diff --git a/pypy/module/faulthandler/test/test_ztranslation.py b/pypy/module/faulthandler/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_faulthandler_translates(): + checkmodule('faulthandler') From noreply at buildbot.pypy.org Sun Oct 5 20:23:00 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:00 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Implement Context.__reduce__ to fix copy.copy() Message-ID: <20141005182300.BA0151C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73797:e08246460e6a Date: 2014-09-19 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/e08246460e6a/ Log: Implement Context.__reduce__ to fix copy.copy() diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -232,8 +232,7 @@ return interp_decimal.decimal_from_float( space, None, w_value, self, exact=False) - def descr_repr(self, space): - # Rounding string. + def _rounding_string(self, space): rounding = rffi.cast(lltype.Signed, self.ctx.c_round) for name, value in ROUND_CONSTANTS: if value == rounding: @@ -242,15 +241,32 @@ else: raise oefmt(space.w_RuntimeError, "bad rounding value") + return round_string + + def descr_repr(self, space): flags = interp_signals.flags_as_string(self.ctx.c_status) traps = interp_signals.flags_as_string(self.ctx.c_traps) return space.wrap("Context(prec=%s, rounding=%s, Emin=%s, Emax=%s, " "capitals=%s, clamp=%s, flags=%s, traps=%s)" % ( - self.ctx.c_prec, round_string, + self.ctx.c_prec, self._rounding_string(space), self.ctx.c_emin, self.ctx.c_emax, self.capitals, rffi.cast(lltype.Signed, self.ctx.c_clamp), flags, traps)) + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([ + space.wrap(self.ctx.c_prec), + space.wrap(self._rounding_string(space)), + space.wrap(self.ctx.c_emin), + space.wrap(self.ctx.c_emax), + space.wrap(self.capitals), + space.wrap(self.ctx.c_clamp), + interp_signals.flags_as_list(space, self.ctx.c_status), + interp_signals.flags_as_list(space, self.ctx.c_traps), + ])]) + def divmod_w(self, space, w_x, w_y): from pypy.module._decimal import interp_decimal return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) @@ -353,6 +369,7 @@ clamp=GetSetProperty(W_Context.get_clamp, W_Context.set_clamp), # __repr__ = interp2app(W_Context.descr_repr), + __reduce__ = interp2app(W_Context.descr_reduce), # copy=interp2app(W_Context.copy_w), clear_flags=interp2app(W_Context.clear_flags_w), diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -60,6 +60,15 @@ raise oefmt(space.w_KeyError, "invalid error flag") +def flags_as_list(space, flags): + result_w = [] + flags = rffi.cast(lltype.Signed, flags) + for (name, flag) in SIGNAL_MAP: + if flag & flags: + w_exc = getattr(get(space), 'w_' + name) + result_w.append(w_exc) + return space.newlist(result_w) + def flags_as_string(flags): builder = rstring.StringBuilder(30) builder.append('[') diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -24,9 +24,20 @@ def assertIs(space, w_x, w_y): assert space.is_w(w_x, w_y) cls.w_assertIs = space.wrap(gateway.interp2app(assertIs)) + def assertIsInstance(space, w_x, w_y): + assert space.isinstance_w(w_x, w_y) + cls.w_assertIsInstance = space.wrap( + gateway.interp2app(assertIsInstance)) cls.w_assertRaises = space.appexec([], """(): return raises""") + def test_deepcopy(self): + import copy + c = self.decimal.DefaultContext.copy() + c.traps[self.decimal.InvalidOperation] = False + nc = copy.deepcopy(c) + assert nc.traps[self.decimal.InvalidOperation] == False + def test_context_repr(self): c = self.decimal.DefaultContext.copy() From noreply at buildbot.pypy.org Sun Oct 5 20:23:02 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:02 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix Context traps when passed as a dictionary. Message-ID: <20141005182302.037581C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73798:8ca6f0d4f9a2 Date: 2014-09-19 20:39 +0200 http://bitbucket.org/pypy/pypy/changeset/8ca6f0d4f9a2/ Log: Fix Context traps when passed as a dictionary. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -129,10 +129,16 @@ if not space.is_none(w_clamp): self.set_clamp(space, w_clamp) if not space.is_none(w_flags): - flags = interp_signals.list_as_flags(space, w_flags) + if space.isinstance_w(w_flags, space.w_list): + flags = interp_signals.list_as_flags(space, w_flags) + else: + flags = interp_signals.dict_as_flags(space, w_flags) rffi.setintfield(self.ctx, 'c_status', flags) if not space.is_none(w_traps): - flags = interp_signals.list_as_flags(space, w_traps) + if space.isinstance_w(w_traps, space.w_list): + flags = interp_signals.list_as_flags(space, w_traps) + else: + flags = interp_signals.dict_as_flags(space, w_traps) rffi.setintfield(self.ctx, 'c_traps', flags) def addstatus(self, space, status): diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -4,7 +4,7 @@ from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import oefmt, OperationError -SIGNAL_MAP = unrolling_iterable([ +SIGNAL_LIST = [ ('InvalidOperation', rmpdec.MPD_IEEE_Invalid_operation), ('FloatOperation', rmpdec.MPD_Float_operation), ('DivisionByZero', rmpdec.MPD_Division_by_zero), @@ -14,7 +14,9 @@ ('Inexact', rmpdec.MPD_Inexact), ('Rounded', rmpdec.MPD_Rounded), ('Clamped', rmpdec.MPD_Clamped), - ]) + ] +SIGNAL_MAP = unrolling_iterable(SIGNAL_LIST) + # Exceptions that inherit from InvalidOperation COND_MAP = unrolling_iterable([ ('InvalidOperation', rmpdec.MPD_Invalid_operation), @@ -53,6 +55,23 @@ flags |= exception_as_flag(space, w_item) return flags +def dict_as_flags(space, w_dict): + if space.len_w(w_dict) != len(SIGNAL_LIST): + raise oefmt(space.w_KeyError, + "invalid signal dict") + flags = 0 + for name, flag in SIGNAL_MAP: + try: + w_value = space.getitem(w_dict, getattr(get(space), 'w_' + name)) + except OperationError as e: + if e.match(space, space.w_KeyError): + raise oefmt(space.w_KeyError, + "invalid signal dict") + raise + if space.bool_w(w_value): + flags |= flag + return flags + def exception_as_flag(space, w_exc): for name, flag in SIGNAL_MAP: if space.is_w(w_exc, getattr(get(space), 'w_' + name)): @@ -78,7 +97,7 @@ if flag & flags: if not first: builder.append(', ') - first = False + first = False builder.append(value) builder.append(']') return builder.build() diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -31,6 +31,11 @@ cls.w_assertRaises = space.appexec([], """(): return raises""") + def test_dict_flags(self): + c = self.decimal.Context(traps=dict.fromkeys( + self.decimal.DefaultContext.traps.keys(), 0)) + assert c.traps[self.decimal.Rounded] == False + def test_deepcopy(self): import copy c = self.decimal.DefaultContext.copy() From noreply at buildbot.pypy.org Sun Oct 5 20:23:03 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:03 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Implement Decimal.__format__. Message-ID: <20141005182303.520CF1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73799:874ecb09ba3b Date: 2014-09-24 08:46 +0200 http://bitbucket.org/pypy/pypy/changeset/874ecb09ba3b/ Log: Implement Decimal.__format__. diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -262,6 +262,104 @@ ctx, status_ptr) return w_result + def _recode_to_utf8(self, ptr): + s = rffi.charp2str(ptr) + if len(s) == 0 or (len(s) == 1 and 32 <= ord(s[0]) < 128): + return None, ptr + u = locale_decode(s) + s = u.encode('utf-8') + ptr = rffi.str2charp(s) + return ptr, ptr + + @unwrap_spec(fmt=unicode) + def descr_format(self, space, fmt, w_override=None): + fmt = fmt.encode('utf-8') + context = interp_context.getcontext(space) + + replace_fillchar = False + if fmt and fmt[0] == '\0': + # NUL fill character: must be replaced with a valid UTF-8 char + # before calling mpd_parse_fmt_str(). + replace_fillchar = True + fmt = '_' + fmt[1:] + + dot_buf = sep_buf = grouping_buf = lltype.nullptr(rffi.CCHARP.TO) + spec = lltype.malloc(rmpdec.MPD_SPEC_PTR.TO, flavor='raw') + try: + if not rmpdec.mpd_parse_fmt_str(spec, fmt, context.capitals): + raise oefmt(space.w_ValueError, "invalid format string") + if replace_fillchar: + # In order to avoid clobbering parts of UTF-8 thousands + # separators or decimal points when the substitution is + # reversed later, the actual placeholder must be an invalid + # UTF-8 byte. + spec.c_fill[0] = '\xff' + spec.c_fill[1] = '\0' + + if w_override: + # Values for decimal_point, thousands_sep and grouping can + # be explicitly specified in the override dict. These values + # take precedence over the values obtained from localeconv() + # in mpd_parse_fmt_str(). The feature is not documented and + # is only used in test_decimal. + try: + w_dot = space.getitem( + w_override, space.wrap("decimal_point")) + except OperationError as e: + if not e.match(space, space.w_KeyError): + raise + else: + dot_buf = rffi.str2charp(space.str_w(w_dot)) + spec.c_dot = dot_buf + try: + w_sep = space.getitem( + w_override, space.wrap("thousands_sep")) + except OperationError as e: + if not e.match(space, space.w_KeyError): + raise + else: + sep_buf = rffi.str2charp(space.str_w(w_sep)) + spec.c_sep = sep_buf + try: + w_grouping = space.getitem( + w_override, space.wrap("grouping")) + except OperationError as e: + if not e.match(space, space.w_KeyError): + raise + else: + grouping_buf = rffi.str2charp(space.str_w(w_grouping)) + spec.c_grouping = grouping_buf + if rmpdec.mpd_validate_lconv(spec) < 0: + raise oefmt(space.w_ValueError, "invalid override dict") + else: + dot_buf, spec.c_dot = self._recode_to_utf8(spec.c_dot) + sep_buf, spec.c_sep = self._recode_to_utf8(spec.c_sep) + + with context.catch_status(space) as (ctx, status_ptr): + decstring = rmpdec.mpd_qformat_spec( + self.mpd, spec, context.ctx, status_ptr) + status = rffi.cast(lltype.Signed, status_ptr[0]) + if not decstring: + if status & rmpdec.MPD_Malloc_error: + raise OperationError(space.w_MemoryError, space.w_None) + else: + raise oefmt(space.w_ValueError, + "format specification exceeds " + "internal limits of _decimal") + finally: + lltype.free(spec, flavor='raw') + if dot_buf: + lltype.free(dot_buf, flavor='raw') + if sep_buf: + lltype.free(sep_buf, flavor='raw') + if grouping_buf: + lltype.free(grouping_buf, flavor='raw') + + ret = rffi.charp2str(decstring) + if replace_fillchar: + ret = ret.replace('\xff', '\0') + return space.wrap(ret.decode('utf-8')) + def compare(self, space, w_other, op): context = interp_context.getcontext(space) w_err, w_self, w_other = convert_binop_cmp( @@ -954,6 +1052,7 @@ __floor__ = interp2app(W_Decimal.descr_floor), __ceil__ = interp2app(W_Decimal.descr_ceil), __round__ = interp2app(W_Decimal.descr_round), + __format__ = interp2app(W_Decimal.descr_format), # __eq__ = interp2app(W_Decimal.descr_eq), __ne__ = interp2app(W_Decimal.descr_ne), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -1044,3 +1044,243 @@ c.traps[Inexact] = True raises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP) + def test_formatting(self): + Decimal = self.decimal.Decimal + + # triples giving a format, a Decimal, and the expected result + test_values = [ + ('e', '0E-15', '0e-15'), + ('e', '2.3E-15', '2.3e-15'), + ('e', '2.30E+2', '2.30e+2'), # preserve significant zeros + ('e', '2.30000E-15', '2.30000e-15'), + ('e', '1.23456789123456789e40', '1.23456789123456789e+40'), + ('e', '1.5', '1.5e+0'), + ('e', '0.15', '1.5e-1'), + ('e', '0.015', '1.5e-2'), + ('e', '0.0000000000015', '1.5e-12'), + ('e', '15.0', '1.50e+1'), + ('e', '-15', '-1.5e+1'), + ('e', '0', '0e+0'), + ('e', '0E1', '0e+1'), + ('e', '0.0', '0e-1'), + ('e', '0.00', '0e-2'), + ('.6e', '0E-15', '0.000000e-9'), + ('.6e', '0', '0.000000e+6'), + ('.6e', '9.999999', '9.999999e+0'), + ('.6e', '9.9999999', '1.000000e+1'), + ('.6e', '-1.23e5', '-1.230000e+5'), + ('.6e', '1.23456789e-3', '1.234568e-3'), + ('f', '0', '0'), + ('f', '0.0', '0.0'), + ('f', '0E-2', '0.00'), + ('f', '0.00E-8', '0.0000000000'), + ('f', '0E1', '0'), # loses exponent information + ('f', '3.2E1', '32'), + ('f', '3.2E2', '320'), + ('f', '3.20E2', '320'), + ('f', '3.200E2', '320.0'), + ('f', '3.2E-6', '0.0000032'), + ('.6f', '0E-15', '0.000000'), # all zeros treated equally + ('.6f', '0E1', '0.000000'), + ('.6f', '0', '0.000000'), + ('.0f', '0', '0'), # no decimal point + ('.0f', '0e-2', '0'), + ('.0f', '3.14159265', '3'), + ('.1f', '3.14159265', '3.1'), + ('.4f', '3.14159265', '3.1416'), + ('.6f', '3.14159265', '3.141593'), + ('.7f', '3.14159265', '3.1415926'), # round-half-even! + ('.8f', '3.14159265', '3.14159265'), + ('.9f', '3.14159265', '3.141592650'), + + ('g', '0', '0'), + ('g', '0.0', '0.0'), + ('g', '0E1', '0e+1'), + ('G', '0E1', '0E+1'), + ('g', '0E-5', '0.00000'), + ('g', '0E-6', '0.000000'), + ('g', '0E-7', '0e-7'), + ('g', '-0E2', '-0e+2'), + ('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig + ('.0n', '3.14159265', '3'), # same for 'n' + ('.1g', '3.14159265', '3'), + ('.2g', '3.14159265', '3.1'), + ('.5g', '3.14159265', '3.1416'), + ('.7g', '3.14159265', '3.141593'), + ('.8g', '3.14159265', '3.1415926'), # round-half-even! + ('.9g', '3.14159265', '3.14159265'), + ('.10g', '3.14159265', '3.14159265'), # don't pad + + ('%', '0E1', '0%'), + ('%', '0E0', '0%'), + ('%', '0E-1', '0%'), + ('%', '0E-2', '0%'), + ('%', '0E-3', '0.0%'), + ('%', '0E-4', '0.00%'), + + ('.3%', '0', '0.000%'), # all zeros treated equally + ('.3%', '0E10', '0.000%'), + ('.3%', '0E-10', '0.000%'), + ('.3%', '2.34', '234.000%'), + ('.3%', '1.234567', '123.457%'), + ('.0%', '1.23', '123%'), + + ('e', 'NaN', 'NaN'), + ('f', '-NaN123', '-NaN123'), + ('+g', 'NaN456', '+NaN456'), + ('.3e', 'Inf', 'Infinity'), + ('.16f', '-Inf', '-Infinity'), + ('.0g', '-sNaN', '-sNaN'), + + ('', '1.00', '1.00'), + + # test alignment and padding + ('6', '123', ' 123'), + ('<6', '123', '123 '), + ('>6', '123', ' 123'), + ('^6', '123', ' 123 '), + ('=+6', '123', '+ 123'), + ('#<10', 'NaN', 'NaN#######'), + ('#<10', '-4.3', '-4.3######'), + ('#<+10', '0.0130', '+0.0130###'), + ('#< 10', '0.0130', ' 0.0130###'), + ('@>10', '-Inf', '@-Infinity'), + ('#>5', '-Inf', '-Infinity'), + ('?^5', '123', '?123?'), + ('%^6', '123', '%123%%'), + (' ^6', '-45.6', '-45.6 '), + ('/=10', '-45.6', '-/////45.6'), + ('/=+10', '45.6', '+/////45.6'), + ('/= 10', '45.6', ' /////45.6'), + ('\x00=10', '-inf', '-\x00Infinity'), + ('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'), + ('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'), + ('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'), + + # thousands separator + (',', '1234567', '1,234,567'), + (',', '123456', '123,456'), + (',', '12345', '12,345'), + (',', '1234', '1,234'), + (',', '123', '123'), + (',', '12', '12'), + (',', '1', '1'), + (',', '0', '0'), + (',', '-1234567', '-1,234,567'), + (',', '-123456', '-123,456'), + ('7,', '123456', '123,456'), + ('8,', '123456', ' 123,456'), + ('08,', '123456', '0,123,456'), # special case: extra 0 needed + ('+08,', '123456', '+123,456'), # but not if there's a sign + (' 08,', '123456', ' 123,456'), + ('08,', '-123456', '-123,456'), + ('+09,', '123456', '+0,123,456'), + # ... with fractional part... + ('07,', '1234.56', '1,234.56'), + ('08,', '1234.56', '1,234.56'), + ('09,', '1234.56', '01,234.56'), + ('010,', '1234.56', '001,234.56'), + ('011,', '1234.56', '0,001,234.56'), + ('012,', '1234.56', '0,001,234.56'), + ('08,.1f', '1234.5', '01,234.5'), + # no thousands separators in fraction part + (',', '1.23456789', '1.23456789'), + (',%', '123.456789', '12,345.6789%'), + (',e', '123456', '1.23456e+5'), + (',E', '123456', '1.23456E+5'), + + # issue 6850 + ('a=-7.0', '0.12345', 'aaaa0.1'), + ] + for fmt, d, result in test_values: + self.assertEqual(format(Decimal(d), fmt), result) + + # bytes format argument + self.assertRaises(TypeError, Decimal(1).__format__, b'-020') + + def test_n_format(self): + Decimal = self.decimal.Decimal + + try: + from locale import CHAR_MAX + except ImportError: + self.skipTest('locale.CHAR_MAX not available') + + def make_grouping(lst): + return ''.join([chr(x) for x in lst]) + + def get_fmt(x, override=None, fmt='n'): + return Decimal(x).__format__(fmt, override) + + # Set up some localeconv-like dictionaries + en_US = { + 'decimal_point' : '.', + 'grouping' : make_grouping([3, 3, 0]), + 'thousands_sep' : ',' + } + + fr_FR = { + 'decimal_point' : ',', + 'grouping' : make_grouping([CHAR_MAX]), + 'thousands_sep' : '' + } + + ru_RU = { + 'decimal_point' : ',', + 'grouping': make_grouping([3, 3, 0]), + 'thousands_sep' : ' ' + } + + crazy = { + 'decimal_point' : '&', + 'grouping': make_grouping([1, 4, 2, CHAR_MAX]), + 'thousands_sep' : '-' + } + + dotsep_wide = { + 'decimal_point' : b'\xc2\xbf'.decode('utf-8'), + 'grouping': make_grouping([3, 3, 0]), + 'thousands_sep' : b'\xc2\xb4'.decode('utf-8') + } + + self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7') + self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7') + self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7') + self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7') + + self.assertEqual(get_fmt(123456789, en_US), '123,456,789') + self.assertEqual(get_fmt(123456789, fr_FR), '123456789') + self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789') + self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3') + + self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8') + self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8') + self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8') + self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8') + + # zero padding + self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234') + self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234') + self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234') + self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234') + + self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345') + self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345') + self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345') + self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345') + self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345') + self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345') + + self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6') + self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6') + self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6') + self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6') + self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6') + self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6') + self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6') + self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6') + + # wide char separator and decimal point + self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'), + '-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5') + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -64,6 +64,7 @@ "mpd_qand", "mpd_qor", "mpd_qxor", "mpd_qcopy_sign", "mpd_qcopy_abs", "mpd_qcopy_negate", "mpd_qround_to_int", "mpd_qround_to_intx", + "mpd_parse_fmt_str", "mpd_qformat_spec", "mpd_validate_lconv", "mpd_version", ], compile_extra=compile_extra, @@ -140,6 +141,13 @@ ('allcr', lltype.Signed), ]) + MPD_SPEC_T = platform.Struct('mpd_spec_t', + [('dot', rffi.CCHARP), + ('sep', rffi.CCHARP), + ('grouping', rffi.CCHARP), + ('fill', rffi.CFixedArray(rffi.CHAR, 5)), + ]) + globals().update(platform.configure(CConfig)) @@ -150,6 +158,7 @@ MPD_PTR = lltype.Ptr(MPD_T) MPD_CONTEXT_PTR = lltype.Ptr(MPD_CONTEXT_T) +MPD_SPEC_PTR = lltype.Ptr(MPD_SPEC_T) # Initialization mpd_qset_ssize = external( @@ -395,4 +404,12 @@ 'mpd_qround_to_intx', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_parse_fmt_str = external( + 'mpd_parse_fmt_str', [MPD_SPEC_PTR, rffi.CCHARP, rffi.INT], rffi.INT) +mpd_qformat_spec = external( + 'mpd_qformat_spec', [MPD_PTR, MPD_SPEC_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + rffi.CCHARP) +mpd_validate_lconv = external( + 'mpd_validate_lconv', [MPD_SPEC_PTR], rffi.INT) + mpd_version = external('mpd_version', [], rffi.CCHARP, macro=True) From noreply at buildbot.pypy.org Sun Oct 5 20:23:04 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:04 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Decimal.compare() Message-ID: <20141005182304.9622A1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73800:0c6834800875 Date: 2014-09-27 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/0c6834800875/ Log: Add Decimal.compare() diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -799,8 +799,25 @@ return unary_method(space, mpd_func, w_self, w_context) return interp2app(descr_method) +# Binary function with an optional context arg. +def binary_method(space, mpd_func, w_self, w_other, w_context): + self = space.interp_w(W_Decimal, w_self) + context = convert_context(space, w_context) + w_a, w_b = convert_binop_raise(space, context, w_self, w_other) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + return w_result + +def make_binary_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + @func_renamer('descr_%s' % mpd_func_name) + def descr_method(space, w_self, w_other, w_context=None): + return binary_method(space, mpd_func, w_self, w_other, w_context) + return interp2app(descr_method) + def convert_context(space, w_context): - if w_context is None: + if space.is_none(w_context): return interp_context.getcontext(space) return space.interp_w(interp_context.W_Context, w_context) @@ -1099,6 +1116,9 @@ to_integral_exact = interp2app(W_Decimal.to_integral_exact_w), to_integral_value = interp2app(W_Decimal.to_integral_w), sqrt = make_unary_method('mpd_qsqrt'), + # Binary arithmetic functions, optional context arg + compare = make_binary_method('mpd_qcompare'), + compare_signal = make_binary_method('mpd_qcompare_signal'), # Ternary arithmetic functions, optional context arg fma = interp2app(W_Decimal.fma_w), # Boolean functions, no context arg From noreply at buildbot.pypy.org Sun Oct 5 20:23:05 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:05 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Not tested, does not translate: remove code. Message-ID: <20141005182305.CBF581C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73801:c43923fde747 Date: 2014-09-27 21:12 +0200 http://bitbucket.org/pypy/pypy/changeset/c43923fde747/ Log: Not tested, does not translate: remove code. diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -266,8 +266,8 @@ s = rffi.charp2str(ptr) if len(s) == 0 or (len(s) == 1 and 32 <= ord(s[0]) < 128): return None, ptr - u = locale_decode(s) - s = u.encode('utf-8') + # XXX use mbstowcs() + s = s ptr = rffi.str2charp(s) return ptr, ptr From noreply at buildbot.pypy.org Sun Oct 5 20:23:07 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:07 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More functions Message-ID: <20141005182307.154851C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73802:f104883f9752 Date: 2014-09-28 18:38 +0200 http://bitbucket.org/pypy/pypy/changeset/f104883f9752/ Log: More functions diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -265,7 +265,7 @@ def _recode_to_utf8(self, ptr): s = rffi.charp2str(ptr) if len(s) == 0 or (len(s) == 1 and 32 <= ord(s[0]) < 128): - return None, ptr + return lltype.nullptr(rffi.CCHARP.TO), ptr # XXX use mbstowcs() s = s ptr = rffi.str2charp(s) @@ -816,6 +816,22 @@ return binary_method(space, mpd_func, w_self, w_other, w_context) return interp2app(descr_method) +# Binary function, optional context arg for conversion errors. +def binary_method_noctx(space, mpd_func, w_self, w_other, w_context): + self = space.interp_w(W_Decimal, w_self) + context = convert_context(space, w_context) + w_a, w_b = convert_binop_raise(space, context, w_self, w_other) + w_result = W_Decimal.allocate(space) + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd) + return w_result + +def make_binary_method_noctx(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + @func_renamer('descr_%s' % mpd_func_name) + def descr_method(space, w_self, w_other, w_context=None): + return binary_method_noctx(space, mpd_func, w_self, w_other, w_context) + return interp2app(descr_method) + def convert_context(space, w_context): if space.is_none(w_context): return interp_context.getcontext(space) @@ -1134,6 +1150,8 @@ is_normal = interp2app(W_Decimal.is_normal_w), is_subnormal = interp2app(W_Decimal.is_subnormal_w), # Binary functions, optional context arg for conversion errors + compare_total = make_binary_method_noctx('mpd_compare_total'), + compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), copy_sign = interp2app(W_Decimal.copy_sign_w), # as_tuple = interp2app(W_Decimal.as_tuple_w), diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -52,6 +52,7 @@ "mpd_isnormal", "mpd_issubnormal", "mpd_isspecial", "mpd_iscanonical", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", "mpd_qcompare", "mpd_qcompare_signal", + "mpd_compare_total", "mpd_compare_total_mag", "mpd_qmin", "mpd_qmax", "mpd_qmin_mag", "mpd_qmax_mag", "mpd_qnext_minus", "mpd_qnext_plus", "mpd_qnext_toward", "mpd_qquantize", "mpd_qreduce", @@ -277,6 +278,12 @@ mpd_qcompare_signal = external( 'mpd_qcompare_signal', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_compare_total = external( + 'mpd_compare_total', + [MPD_PTR, MPD_PTR, MPD_PTR], lltype.Void) +mpd_compare_total_mag = external( + 'mpd_compare_total_mag', + [MPD_PTR, MPD_PTR, MPD_PTR], lltype.Void) mpd_qmin = external( 'mpd_qmin', From noreply at buildbot.pypy.org Sun Oct 5 20:23:08 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:08 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Decimal.to_eng_string() Message-ID: <20141005182308.3D5701C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73803:7838c75545bd Date: 2014-09-28 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7838c75545bd/ Log: Decimal.to_eng_string() diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -497,6 +497,19 @@ cp = rmpdec.mpd_class(self.mpd, context.ctx) return space.wrap(rffi.charp2str(cp)) + def to_eng_string_w(self, space, w_context=None): + context = interp_context.ensure_context(space, w_context) + with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as cp_ptr: + size = rmpdec.mpd_to_eng_size(cp_ptr, self.mpd, context.capitals) + if size < 0: + raise OperationError(space.w_MemoryError, space.w_None) + cp = cp_ptr[0] + try: + result = rffi.charpsize2str(cp, size) + finally: + rmpdec.mpd_free(cp) + return space.wrap(result) # Convert bytes to unicode + def to_integral_w(self, space, w_rounding=None, w_context=None): context = interp_context.ensure_context(space, w_context) w_workctx = context.copy_w(space) @@ -1128,6 +1141,7 @@ next_plus = make_unary_method('mpd_qnext_plus'), normalize = make_unary_method('mpd_qreduce'), number_class = interp2app(W_Decimal.number_class_w), + to_eng_string = interp2app(W_Decimal.to_eng_string_w), to_integral = interp2app(W_Decimal.to_integral_w), to_integral_exact = interp2app(W_Decimal.to_integral_exact_w), to_integral_value = interp2app(W_Decimal.to_integral_w), diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -46,7 +46,7 @@ "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", "mpd_qnew", "mpd_del", - "mpd_to_sci", "mpd_to_sci_size", + "mpd_to_sci", "mpd_to_sci_size", "mpd_to_eng_size", "mpd_iszero", "mpd_isnegative", "mpd_issigned", "mpd_isfinite", "mpd_isinfinite", "mpd_isnormal", "mpd_issubnormal", "mpd_isspecial", "mpd_iscanonical", @@ -244,6 +244,8 @@ 'mpd_to_sci', [MPD_PTR, rffi.INT], rffi.CCHARP) mpd_to_sci_size = external( 'mpd_to_sci_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) +mpd_to_eng_size = external( + 'mpd_to_eng_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) # Operations mpd_iszero = external( From noreply at buildbot.pypy.org Sun Oct 5 20:23:09 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:09 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Decimal.real and .imag Message-ID: <20141005182309.6B4C51C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73804:18a9e5d3a851 Date: 2014-09-28 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/18a9e5d3a851/ Log: Decimal.real and .imag diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -360,6 +360,14 @@ ret = ret.replace('\xff', '\0') return space.wrap(ret.decode('utf-8')) + def get_real(self, space): + context = interp_context.getcontext(space) + return decimal_from_decimal(space, None, self, context, exact=True) + + def get_imag(self, space): + context = interp_context.getcontext(space) + return decimal_from_ssize(space, None, 0, context, exact=True) + def compare(self, space, w_other, op): context = interp_context.getcontext(space) w_err, w_self, w_other = convert_binop_cmp( @@ -1100,6 +1108,9 @@ __round__ = interp2app(W_Decimal.descr_round), __format__ = interp2app(W_Decimal.descr_format), # + real = GetSetProperty(W_Decimal.get_real), + imag = GetSetProperty(W_Decimal.get_imag), + # __eq__ = interp2app(W_Decimal.descr_eq), __ne__ = interp2app(W_Decimal.descr_ne), __le__ = interp2app(W_Decimal.descr_le), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -330,6 +330,12 @@ assert str(nc.create_decimal(Decimal('NaN12345'))) == 'NaN' assert nc.flags[InvalidOperation] + def test_complex(self): + Decimal = self.decimal.Decimal + d = Decimal("2.34") + assert d.real == d + assert d.imag == 0 + def test_operations(self): Decimal = self.decimal.Decimal From noreply at buildbot.pypy.org Sun Oct 5 20:23:10 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:10 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More methods Message-ID: <20141005182310.9F8C91C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73805:669e787ee92e Date: 2014-09-28 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/669e787ee92e/ Log: More methods diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1160,6 +1160,11 @@ # Binary arithmetic functions, optional context arg compare = make_binary_method('mpd_qcompare'), compare_signal = make_binary_method('mpd_qcompare_signal'), + max = make_binary_method('mpd_qmax'), + max_mag = make_binary_method('mpd_qmax_mag'), + min = make_binary_method('mpd_qmin'), + next_toward = make_binary_method('mpd_qnext_toward'), + remainder_near = make_binary_method('mpd_qrem_near'), # Ternary arithmetic functions, optional context arg fma = interp2app(W_Decimal.fma_w), # Boolean functions, no context arg From noreply at buildbot.pypy.org Sun Oct 5 20:23:11 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:11 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Progress Message-ID: <20141005182311.DFC5C1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73806:f9ce042d71da Date: 2014-09-28 21:39 +0200 http://bitbucket.org/pypy/pypy/changeset/f9ce042d71da/ Log: Progress diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -580,6 +580,10 @@ context = interp_context.ensure_context(space, w_context) return space.wrap(bool(rmpdec.mpd_issubnormal(self.mpd, context.ctx))) + def conjugate_w(self, space): + context = interp_context.getcontext(space) + return decimal_from_decimal(space, None, self, context, exact=True) + def as_tuple_w(self, space): "Return the DecimalTuple representation of a Decimal" w_sign = space.wrap(rmpdec.mpd_sign(self.mpd)) @@ -1163,8 +1167,15 @@ max = make_binary_method('mpd_qmax'), max_mag = make_binary_method('mpd_qmax_mag'), min = make_binary_method('mpd_qmin'), + min_mag = make_binary_method('mpd_qmin_mag'), next_toward = make_binary_method('mpd_qnext_toward'), remainder_near = make_binary_method('mpd_qrem_near'), + logical_and = make_binary_method('mpd_qand'), + logical_or = make_binary_method('mpd_qor'), + logical_xor = make_binary_method('mpd_qxor'), + rotate = make_binary_method('mpd_qrotate'), + scaleb = make_binary_method('mpd_qscaleb'), + shift = make_binary_method('mpd_qshift'), # Ternary arithmetic functions, optional context arg fma = interp2app(W_Decimal.fma_w), # Boolean functions, no context arg @@ -1179,6 +1190,8 @@ # Boolean functions, optional context arg is_normal = interp2app(W_Decimal.is_normal_w), is_subnormal = interp2app(W_Decimal.is_subnormal_w), + # Unary functions, no context arg + conjugate = interp2app(W_Decimal.conjugate_w), # Binary functions, optional context arg for conversion errors compare_total = make_binary_method_noctx('mpd_compare_total'), compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -335,6 +335,7 @@ d = Decimal("2.34") assert d.real == d assert d.imag == 0 + assert d.conjugate() == d def test_operations(self): Decimal = self.decimal.Decimal diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -63,6 +63,7 @@ "mpd_qexp", "mpd_qln", "mpd_qlog10", "mpd_qlogb", "mpd_qsqrt", "mpd_qinvert", "mpd_qand", "mpd_qor", "mpd_qxor", + "mpd_qrotate", "mpd_qscaleb", "mpd_qshift", "mpd_qcopy_sign", "mpd_qcopy_abs", "mpd_qcopy_negate", "mpd_qround_to_int", "mpd_qround_to_intx", "mpd_parse_fmt_str", "mpd_qformat_spec", "mpd_validate_lconv", @@ -406,6 +407,16 @@ [MPD_PTR, MPD_PTR, rffi.UINTP], lltype.Void) +mpd_qrotate = external( + 'mpd_qrotate', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qscaleb = external( + 'mpd_qscaleb', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qshift = external( + 'mpd_qshift', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + mpd_qround_to_int = external( 'mpd_qround_to_int', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) From noreply at buildbot.pypy.org Sun Oct 5 20:23:13 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:13 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More methods Message-ID: <20141005182313.18BAA1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73807:a75ccd9fcb14 Date: 2014-09-30 10:38 +0200 http://bitbucket.org/pypy/pypy/changeset/a75ccd9fcb14/ Log: More methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -306,6 +306,19 @@ ctx, status_ptr) return w_result + def apply_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + return w_a.apply(space, self) + + def copy_abs_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qcopy_abs(w_result.mpd, w_a.mpd, status_ptr) + return w_result + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) W_Context.__init__(w_result, space) @@ -338,6 +351,19 @@ return w_result return interp2app(func_w) +def make_binary_method_noctx(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + @unwrap_spec(w_context=W_Context) + @func_renamer('descr_%s' % mpd_func_name) + def func_w(space, w_context, w_x, w_y): + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise( + space, w_context, w_x, w_y) + w_result = interp_decimal.W_Decimal.allocate(space) + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd) + return w_result + return interp2app(func_w) + def make_bool_method(mpd_func_name): mpd_func = getattr(rmpdec, mpd_func_name) @unwrap_spec(w_context=W_Context) @@ -430,6 +456,13 @@ is_nan=make_bool_method_noctx('mpd_isnan'), is_qnan=make_bool_method_noctx('mpd_isqnan'), is_snan=make_bool_method_noctx('mpd_issnan'), + # Functions with a single decimal argument + _apply=interp2app(W_Context.apply_w), + apply=interp2app(W_Context.apply_w), + copy_abs=interp2app(W_Context.copy_abs_w), + # Functions with two decimal arguments + compare_total = make_binary_method_noctx('mpd_compare_total'), + compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), ) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -544,6 +544,18 @@ w_workctx.ctx, status_ptr) return w_result + def quantize_w(self, space, w_exp, w_rounding=None, w_context=None): + context = interp_context.ensure_context(space, w_context) + w_workctx = context.copy_w(space) + if not space.is_none(w_rounding): + w_workctx.set_rounding(space, w_rounding) + w_a, w_b = convert_binop_raise(space, context, self, w_exp) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qquantize(w_result.mpd, w_a.mpd, w_b.mpd, + w_workctx.ctx, status_ptr) + return w_result + # Ternary arithmetic functions, optional context arg def fma_w(self, space, w_other, w_third, w_context=None): context = interp_context.ensure_context(space, w_context) @@ -1169,6 +1181,7 @@ min = make_binary_method('mpd_qmin'), min_mag = make_binary_method('mpd_qmin_mag'), next_toward = make_binary_method('mpd_qnext_toward'), + quantize = interp2app(W_Decimal.quantize_w), remainder_near = make_binary_method('mpd_qrem_near'), logical_and = make_binary_method('mpd_qand'), logical_or = make_binary_method('mpd_qor'), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -330,6 +330,32 @@ assert str(nc.create_decimal(Decimal('NaN12345'))) == 'NaN' assert nc.flags[InvalidOperation] + def test_quantize(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + InvalidOperation = self.decimal.InvalidOperation + + c = Context(Emax=99999, Emin=-99999) + self.assertEqual( + Decimal('7.335').quantize(Decimal('.01')), + Decimal('7.34') + ) + self.assertEqual( + Decimal('7.335').quantize(Decimal('.01'), + rounding=self.decimal.ROUND_DOWN), + Decimal('7.33') + ) + self.assertRaises( + InvalidOperation, + Decimal("10e99999").quantize, Decimal('1e100000'), context=c + ) + + c = Context() + d = Decimal("0.871831e800") + x = d.quantize(context=c, exp=Decimal("1e797"), + rounding=self.decimal.ROUND_DOWN) + self.assertEqual(x, Decimal('8.71E+799')) + def test_complex(self): Decimal = self.decimal.Decimal d = Decimal("2.34") From noreply at buildbot.pypy.org Sun Oct 5 20:23:14 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:14 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix comparison with complex Message-ID: <20141005182314.3AC6C1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73808:37fcab3a88c9 Date: 2014-09-30 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/37fcab3a88c9/ Log: Fix comparison with complex diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -770,7 +770,8 @@ new_status = (rmpdec.MPD_Float_operation | rffi.cast(lltype.Signed, context.ctx.c_status)) context.ctx.c_status = rffi.cast(rffi.UINT, new_status) - w_w = decimal_from_float(space, None, w_w, context, exact=True) + w_w = decimal_from_float(space, None, space.wrap(real), + context, exact=True) else: return space.w_NotImplemented, None, None elif space.isinstance_w(w_w, space.fromcache(State).w_Rational): diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -21,6 +21,12 @@ cls.w_assertEqual = space.appexec([], """(): def assertEqual(x, y): assert x == y return assertEqual""") + cls.w_assertIs = space.appexec([], """(): + def assertIs(x, y): assert x is y + return assertIs""") + cls.w_assertNotEqual = space.appexec([], """(): + def assertNotEqual(x, y): assert x != y + return assertNotEqual""") cls.w_assertRaises = space.appexec([], """(): return raises""") def test_explicit_empty(self): @@ -888,6 +894,26 @@ doit(c, signal=FloatOperation) test_containers(c, signal=FloatOperation) + def test_decimal_complex_comparison(self): + Decimal = self.decimal.Decimal + + da = Decimal('0.25') + db = Decimal('3.0') + self.assertNotEqual(da, (1.5+0j)) + self.assertNotEqual((1.5+0j), da) + self.assertEqual(da, (0.25+0j)) + self.assertEqual((0.25+0j), da) + self.assertEqual((3.0+0j), db) + self.assertEqual(db, (3.0+0j)) + + self.assertNotEqual(db, (3.0+1j)) + self.assertNotEqual((3.0+1j), db) + + self.assertIs(db.__lt__(3.0+0j), NotImplemented) + self.assertIs(db.__le__(3.0+0j), NotImplemented) + self.assertIs(db.__gt__(3.0+0j), NotImplemented) + self.assertIs(db.__le__(3.0+0j), NotImplemented) + def test_decimal_fraction_comparison(self): C = self.decimal D = self.decimal.Decimal From noreply at buildbot.pypy.org Sun Oct 5 20:23:15 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:15 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix, and add pickle support. Message-ID: <20141005182315.6EC9B1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73809:5cbebee1fe07 Date: 2014-09-30 23:17 +0200 http://bitbucket.org/pypy/pypy/changeset/5cbebee1fe07/ Log: Fix, and add pickle support. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -178,7 +178,7 @@ "valid range for prec is [1, MAX_PREC]") def get_rounding(self, space): - return space.wrap(rmpdec.mpd_getround(self.ctx)) + return space.wrap(self._rounding_string(space)) def set_rounding(self, space, w_rounding): rounding = space.str_w(w_rounding) @@ -387,7 +387,7 @@ return interp2app(func_w) W_Context.typedef = TypeDef( - 'Context', + '_decimal.Context', __new__ = interp2app(descr_new_context), __init__ = interp2app(W_Context.descr_init), # Attributes diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -360,6 +360,10 @@ ret = ret.replace('\xff', '\0') return space.wrap(ret.decode('utf-8')) + def descr_reduce(self, space): + return space.newtuple([space.type(self), + space.newtuple([space.str(self)])]) + def get_real(self, space): context = interp_context.getcontext(space) return decimal_from_decimal(space, None, self, context, exact=True) @@ -1111,7 +1115,7 @@ exact=True) W_Decimal.typedef = TypeDef( - 'Decimal', + '_decimal.Decimal', __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), __repr__ = interp2app(W_Decimal.descr_repr), @@ -1124,6 +1128,7 @@ __ceil__ = interp2app(W_Decimal.descr_ceil), __round__ = interp2app(W_Decimal.descr_round), __format__ = interp2app(W_Decimal.descr_format), + __reduce__ = interp2app(W_Decimal.descr_reduce), # real = GetSetProperty(W_Decimal.get_real), imag = GetSetProperty(W_Decimal.get_imag), diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -43,6 +43,31 @@ nc = copy.deepcopy(c) assert nc.traps[self.decimal.InvalidOperation] == False + def test_none_args(self): + Context = self.decimal.Context + InvalidOperation = self.decimal.InvalidOperation + DivisionByZero = self.decimal.DivisionByZero + Overflow = self.decimal.Overflow + + def assert_signals(context, attr, expected): + d = getattr(context, attr) + self.assertTrue( + all(d[s] if s in expected else not d[s] for s in d)) + + c1 = Context() + c2 = Context(prec=None, rounding=None, Emax=None, Emin=None, + capitals=None, clamp=None, flags=None, traps=None) + for c in [c1, c2]: + self.assertEqual(c.prec, 28) + self.assertEqual(c.rounding, self.decimal.ROUND_HALF_EVEN) + self.assertEqual(c.Emax, 999999) + self.assertEqual(c.Emin, -999999) + self.assertEqual(c.capitals, 1) + self.assertEqual(c.clamp, 0) + assert_signals(c, 'flags', []) + assert_signals(c, 'traps', [InvalidOperation, DivisionByZero, + Overflow]) + def test_context_repr(self): c = self.decimal.DefaultContext.copy() @@ -374,3 +399,19 @@ z = y.copy_sign(Decimal(1)) self.assertEqual(z, x) + def test_pickle(self): + import pickle + Context = self.decimal.Context + + # Round trip + c = Context() + e = pickle.loads(pickle.dumps(c)) + + self.assertEqual(c.prec, e.prec) + self.assertEqual(c.Emin, e.Emin) + self.assertEqual(c.Emax, e.Emax) + self.assertEqual(c.rounding, e.rounding) + self.assertEqual(c.capitals, e.capitals) + self.assertEqual(c.clamp, e.clamp) + self.assertEqual(c.flags, e.flags) + self.assertEqual(c.traps, e.traps) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -1343,3 +1343,10 @@ self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'), '-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5') + def test_pickle(self): + import pickle + Decimal = self.decimal.Decimal + d = Decimal('-3.141590000') + p = pickle.dumps(d) + e = pickle.loads(p) + self.assertEqual(d, e) From noreply at buildbot.pypy.org Sun Oct 5 20:23:16 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:16 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix cross-pickling between _decimal and decimal.py Message-ID: <20141005182316.991F01C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73810:0bde01cb23ae Date: 2014-10-01 19:08 +0200 http://bitbucket.org/pypy/pypy/changeset/0bde01cb23ae/ Log: Fix cross-pickling between _decimal and decimal.py diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -387,7 +387,7 @@ return interp2app(func_w) W_Context.typedef = TypeDef( - '_decimal.Context', + 'decimal.Context', __new__ = interp2app(descr_new_context), __init__ = interp2app(W_Context.descr_init), # Attributes diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1115,7 +1115,7 @@ exact=True) W_Decimal.typedef = TypeDef( - '_decimal.Decimal', + 'decimal.Decimal', __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), __repr__ = interp2app(W_Decimal.descr_repr), From noreply at buildbot.pypy.org Sun Oct 5 20:23:17 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:17 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: add Decimal.same_quantum Message-ID: <20141005182317.D1B671C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73811:95ba229db13d Date: 2014-10-01 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/95ba229db13d/ Log: add Decimal.same_quantum diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -277,6 +277,12 @@ from pypy.module._decimal import interp_decimal return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) + def same_quantum_w(self, space, w_v, w_w): + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_v, w_w) + result = rmpdec.mpd_same_quantum(w_a.mpd, w_b.mpd) + return space.wrap(bool(result)) + # Ternary operations def power_w(self, space, w_a, w_b, w_modulo=None): from pypy.module._decimal import interp_decimal @@ -443,6 +449,7 @@ logical_and=make_binary_method('mpd_qand'), logical_or=make_binary_method('mpd_qor'), logical_xor=make_binary_method('mpd_qxor'), + same_quantum = interp2app(W_Context.same_quantum_w), # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -495,13 +495,19 @@ def copy_sign_w(self, space, w_other, w_context=None): context = convert_context(space, w_context) - w_other = convert_op_raise(space, context, w_other) + w_a, w_b = convert_binop_raise(space, context, self, w_other) w_result = W_Decimal.allocate(space) with context.catch_status(space) as (ctx, status_ptr): - rmpdec.mpd_qcopy_sign(w_result.mpd, self.mpd, w_other.mpd, + rmpdec.mpd_qcopy_sign(w_result.mpd, w_a.mpd, w_b.mpd, status_ptr) return w_result + def same_quantum_w(self, space, w_other, w_context=None): + context = convert_context(space, w_context) + w_a, w_b = convert_binop_raise(space, context, self, w_other) + result = rmpdec.mpd_same_quantum(w_a.mpd, w_b.mpd) + return space.wrap(bool(result)) + # Unary arithmetic functions, optional context arg def number_class_w(self, space, w_context=None): @@ -1215,6 +1221,7 @@ compare_total = make_binary_method_noctx('mpd_compare_total'), compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), copy_sign = interp2app(W_Decimal.copy_sign_w), + same_quantum = interp2app(W_Decimal.same_quantum_w), # as_tuple = interp2app(W_Decimal.as_tuple_w), from_float = interp2app(decimal_from_float_w, as_classmethod=True), diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -41,7 +41,7 @@ "mpd_qcopy", "mpd_qncopy", "mpd_setspecial", "mpd_clear_flags", "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", "mpd_set_sign", "mpd_set_positive", "mpd_sign", "mpd_qfinalize", - "mpd_class", + "mpd_class", "mpd_same_quantum", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", @@ -203,6 +203,8 @@ 'mpd_qfinalize', [MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_class = external( 'mpd_class', [MPD_PTR, MPD_CONTEXT_PTR], rffi.CCHARP) +mpd_same_quantum = external( + 'mpd_same_quantum', [MPD_PTR, MPD_PTR], rffi.INT) # Context operations mpd_getprec = external( From noreply at buildbot.pypy.org Sun Oct 5 20:23:19 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:19 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: more methods Message-ID: <20141005182319.1D1481C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73812:7fc8de349403 Date: 2014-10-02 19:27 +0200 http://bitbucket.org/pypy/pypy/changeset/7fc8de349403/ Log: more methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -273,10 +273,29 @@ interp_signals.flags_as_list(space, self.ctx.c_traps), ])]) + def number_class_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + cp = rmpdec.mpd_class(w_a.mpd, self.ctx) + return space.wrap(rffi.charp2str(cp)) + + def to_eng_string_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + return w_a.to_eng_string_w(space, self) + def divmod_w(self, space, w_x, w_y): from pypy.module._decimal import interp_decimal return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) + def copy_sign_w(self, space, w_v, w_w): + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_v, w_w) + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qcopy_sign(w_result.mpd, w_a.mpd, w_b.mpd, status_ptr) + return w_result + def same_quantum_w(self, space, w_v, w_w): from pypy.module._decimal import interp_decimal w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_v, w_w) @@ -325,6 +344,19 @@ rmpdec.mpd_qcopy_abs(w_result.mpd, w_a.mpd, status_ptr) return w_result + def copy_decimal_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + return w_a + + def copy_negate_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qcopy_negate(w_result.mpd, w_a.mpd, status_ptr) + return w_result + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) W_Context.__init__(w_result, space) @@ -428,7 +460,10 @@ to_integral_exact=make_unary_method('mpd_qround_to_intx'), to_integral_value=make_unary_method('mpd_qround_to_int', tag='value'), sqrt=make_unary_method('mpd_qsqrt'), + logb=make_unary_method('mpd_qlogb'), logical_invert=make_unary_method('mpd_qinvert'), + number_class=interp2app(W_Context.number_class_w), + to_eng_string=interp2app(W_Context.to_eng_string_w), # Binary Operations add=make_binary_method('mpd_qadd'), subtract=make_binary_method('mpd_qsub'), @@ -446,10 +481,14 @@ quantize=make_binary_method('mpd_qquantize'), remainder=make_binary_method('mpd_qrem'), remainder_near=make_binary_method('mpd_qrem_near'), + copy_sign = interp2app(W_Context.copy_sign_w), logical_and=make_binary_method('mpd_qand'), logical_or=make_binary_method('mpd_qor'), logical_xor=make_binary_method('mpd_qxor'), + rotate=make_binary_method('mpd_qrotate'), same_quantum = interp2app(W_Context.same_quantum_w), + scaleb=make_binary_method('mpd_qscaleb'), + shift=make_binary_method('mpd_qshift'), # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), @@ -467,6 +506,8 @@ _apply=interp2app(W_Context.apply_w), apply=interp2app(W_Context.apply_w), copy_abs=interp2app(W_Context.copy_abs_w), + copy_decimal=interp2app(W_Context.copy_decimal_w), + copy_negate=interp2app(W_Context.copy_negate_w), # Functions with two decimal arguments compare_total = make_binary_method_noctx('mpd_compare_total'), compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -606,6 +606,13 @@ context = interp_context.getcontext(space) return decimal_from_decimal(space, None, self, context, exact=True) + def adjusted_w(self, space): + if rmpdec.mpd_isspecial(self.mpd): + ret = 0 + else: + ret = rmpdec.mpd_adjexp(self.mpd) + return space.wrap(ret) + def as_tuple_w(self, space): "Return the DecimalTuple representation of a Decimal" w_sign = space.wrap(rmpdec.mpd_sign(self.mpd)) @@ -1216,6 +1223,7 @@ is_normal = interp2app(W_Decimal.is_normal_w), is_subnormal = interp2app(W_Decimal.is_subnormal_w), # Unary functions, no context arg + adjusted = interp2app(W_Decimal.adjusted_w), conjugate = interp2app(W_Decimal.conjugate_w), # Binary functions, optional context arg for conversion errors compare_total = make_binary_method_noctx('mpd_compare_total'), diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -41,7 +41,7 @@ "mpd_qcopy", "mpd_qncopy", "mpd_setspecial", "mpd_clear_flags", "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", "mpd_set_sign", "mpd_set_positive", "mpd_sign", "mpd_qfinalize", - "mpd_class", "mpd_same_quantum", + "mpd_class", "mpd_same_quantum", "mpd_adjexp", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", @@ -205,6 +205,8 @@ 'mpd_class', [MPD_PTR, MPD_CONTEXT_PTR], rffi.CCHARP) mpd_same_quantum = external( 'mpd_same_quantum', [MPD_PTR, MPD_PTR], rffi.INT) +mpd_adjexp = external( + 'mpd_adjexp', [MPD_PTR], rffi.SSIZE_T) # Context operations mpd_getprec = external( From noreply at buildbot.pypy.org Sun Oct 5 20:23:20 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:20 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: more methods Message-ID: <20141005182320.414ED1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73813:468148a8888b Date: 2014-10-02 19:38 +0200 http://bitbucket.org/pypy/pypy/changeset/468148a8888b/ Log: more methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -284,6 +284,11 @@ w_a = interp_decimal.convert_op_raise(space, self, w_v) return w_a.to_eng_string_w(space, self) + def to_sci_string_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + return w_a.descr_str(space, self) + def divmod_w(self, space, w_x, w_y): from pypy.module._decimal import interp_decimal return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) @@ -464,6 +469,7 @@ logical_invert=make_unary_method('mpd_qinvert'), number_class=interp2app(W_Context.number_class_w), to_eng_string=interp2app(W_Context.to_eng_string_w), + to_sci_string=interp2app(W_Context.to_sci_string_w), # Binary Operations add=make_binary_method('mpd_qadd'), subtract=make_binary_method('mpd_qsub'), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -80,8 +80,8 @@ rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) return w_result - def descr_str(self, space): - context = interp_context.getcontext(space) + def descr_str(self, space, w_context=None): + context = convert_context(space, w_context) with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as cp_ptr: size = rmpdec.mpd_to_sci_size(cp_ptr, self.mpd, context.capitals) if size < 0: From noreply at buildbot.pypy.org Sun Oct 5 20:23:21 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:21 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More methods Message-ID: <20141005182321.91EDE1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73814:5a97de386f52 Date: 2014-10-04 21:23 +0200 http://bitbucket.org/pypy/pypy/changeset/5a97de386f52/ Log: More methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -307,6 +307,17 @@ result = rmpdec.mpd_same_quantum(w_a.mpd, w_b.mpd) return space.wrap(bool(result)) + def etiny_w(self, space): + return space.wrap(rmpdec.mpd_etiny(self.ctx)) + + def etop_w(self, space): + return space.wrap(rmpdec.mpd_etop(self.ctx)) + + def radix_w(self, space): + from pypy.module._decimal import interp_decimal + return interp_decimal.decimal_from_ssize( + space, None, 10, self, exact=True) + # Ternary operations def power_w(self, space, w_a, w_b, w_modulo=None): from pypy.module._decimal import interp_decimal @@ -341,6 +352,12 @@ w_a = interp_decimal.convert_op_raise(space, self, w_v) return w_a.apply(space, self) + def canonical_w(self, space, w_v): + from pypy.module._decimal import interp_decimal + # Just check the type + space.interp_w(interp_decimal.W_Decimal, w_v) + return w_v + def copy_abs_w(self, space, w_v): from pypy.module._decimal import interp_decimal w_a = interp_decimal.convert_op_raise(space, self, w_v) @@ -498,6 +515,10 @@ # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), + # No argument + Etiny=interp2app(W_Context.etiny_w), + Etop=interp2app(W_Context.etop_w), + radix=interp2app(W_Context.radix_w), # Boolean operations is_signed=make_bool_method_noctx('mpd_issigned'), is_zero=make_bool_method_noctx('mpd_iszero'), @@ -511,6 +532,7 @@ # Functions with a single decimal argument _apply=interp2app(W_Context.apply_w), apply=interp2app(W_Context.apply_w), + canonical=interp2app(W_Context.canonical_w), copy_abs=interp2app(W_Context.copy_abs_w), copy_decimal=interp2app(W_Context.copy_decimal_w), copy_negate=interp2app(W_Context.copy_negate_w), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -606,6 +606,10 @@ context = interp_context.getcontext(space) return decimal_from_decimal(space, None, self, context, exact=True) + def radix_w(self, space): + context = interp_context.getcontext(space) + return decimal_from_ssize(space, None, 10, context, exact=True) + def adjusted_w(self, space): if rmpdec.mpd_isspecial(self.mpd): ret = 0 @@ -1225,6 +1229,7 @@ # Unary functions, no context arg adjusted = interp2app(W_Decimal.adjusted_w), conjugate = interp2app(W_Decimal.conjugate_w), + radix = interp2app(W_Decimal.radix_w), # Binary functions, optional context arg for conversion errors compare_total = make_binary_method_noctx('mpd_compare_total'), compare_total_mag = make_binary_method_noctx('mpd_compare_total_mag'), diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -41,7 +41,7 @@ "mpd_qcopy", "mpd_qncopy", "mpd_setspecial", "mpd_clear_flags", "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", "mpd_set_sign", "mpd_set_positive", "mpd_sign", "mpd_qfinalize", - "mpd_class", "mpd_same_quantum", "mpd_adjexp", + "mpd_class", "mpd_same_quantum", "mpd_adjexp", "mpd_etiny", "mpd_etop", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", @@ -207,6 +207,10 @@ 'mpd_same_quantum', [MPD_PTR, MPD_PTR], rffi.INT) mpd_adjexp = external( 'mpd_adjexp', [MPD_PTR], rffi.SSIZE_T) +mpd_etiny = external( + 'mpd_etiny', [MPD_CONTEXT_PTR], rffi.SSIZE_T) +mpd_etop = external( + 'mpd_etop', [MPD_CONTEXT_PTR], rffi.SSIZE_T) # Context operations mpd_getprec = external( From noreply at buildbot.pypy.org Sun Oct 5 20:23:22 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:22 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More fixes Message-ID: <20141005182322.CD84F1C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73815:ce9baaf3a921 Date: 2014-10-05 20:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ce9baaf3a921/ Log: More fixes diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -531,7 +531,7 @@ is_snan=make_bool_method_noctx('mpd_issnan'), # Functions with a single decimal argument _apply=interp2app(W_Context.apply_w), - apply=interp2app(W_Context.apply_w), + # apply=interp2app(W_Context.apply_w), canonical=interp2app(W_Context.canonical_w), copy_abs=interp2app(W_Context.copy_abs_w), copy_decimal=interp2app(W_Context.copy_decimal_w), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1030,7 +1030,9 @@ def decimal_from_decimal(space, w_subtype, w_value, context, exact=True): assert isinstance(w_value, W_Decimal) if exact: - if space.is_w(w_subtype, space.gettypeobject(W_Decimal.typedef)): + if (space.is_w(w_subtype, space.gettypeobject(W_Decimal.typedef)) and + space.is_w(space.type(w_value), + space.gettypeobject(W_Decimal.typedef))): return w_value w_result = W_Decimal.allocate(space, w_subtype) with interp_context.ConvContext( From noreply at buildbot.pypy.org Sun Oct 5 20:23:24 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:24 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20141005182324.40FD31C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73816:4b30aa9e63e5 Date: 2014-10-05 20:20 +0200 http://bitbucket.org/pypy/pypy/changeset/4b30aa9e63e5/ Log: merge heads diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -6,6 +6,8 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.error import OperationError, oefmt +MAX_NTHREADS = 100 + cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( includes=[cwd.join('faulthandler.h')], @@ -43,12 +45,18 @@ class FatalErrorState(object): def __init__(self, space): self.enabled = False + self.all_threads = True -def enable(space): - space.fromcache(FatalErrorState).enabled = True + at unwrap_spec(w_file=WrappedDefault(None), + w_all_threads=WrappedDefault(True)) +def enable(space, w_file, w_all_threads): + state = space.fromcache(FatalErrorState) + state.enabled = True + state.all_threads = bool(space.int_w(w_all_threads)) def disable(space): - space.fromcache(FatalErrorState).enabled = False + state = space.fromcache(FatalErrorState) + state.enabled = False def is_enabled(space): return space.wrap(space.fromcache(FatalErrorState).enabled) @@ -60,25 +68,40 @@ @unwrap_spec(w_file=WrappedDefault(None), w_all_threads=WrappedDefault(True)) def dump_traceback(space, w_file, w_all_threads): - ec = space.getexecutioncontext() - ecs = space.threadlocals.getallvalues() + current_ec = space.getexecutioncontext() + if space.int_w(w_all_threads): + ecs = space.threadlocals.getallvalues() + else: + ecs = {0: current_ec} if space.is_none(w_file): w_file = space.sys.get('stderr') fd = space.c_filedescriptor_w(w_file) - frame = ec.gettopframe() - while frame: - code = frame.pycode - lineno = frame.get_last_lineno() - if code: - os.write(fd, "File \"%s\", line %s in %s\n" % ( - code.co_filename, lineno, code.co_name)) + nthreads = 0 + for thread_ident, ec in ecs.items(): + if nthreads: + os.write(fd, "\n") + if nthreads >= MAX_NTHREADS: + os.write(fd, "...\n") + break + if ec is current_ec: + os.write(fd, "Current thread 0x%x:\n" % thread_ident) else: - os.write(fd, "File ???, line %s in ???\n" % ( - lineno,)) + os.write(fd, "Thread 0x%x:\n" % thread_ident) - frame = frame.f_backref() + frame = ec.gettopframe() + while frame: + code = frame.pycode + lineno = frame.get_last_lineno() + if code: + os.write(fd, "File \"%s\", line %s in %s\n" % ( + code.co_filename, lineno, code.co_name)) + else: + os.write(fd, "File ???, line %s in ???\n" % ( + lineno,)) + + frame = frame.f_backref() @unwrap_spec(w_release_gil=WrappedDefault(False)) diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py --- a/pypy/module/faulthandler/test/test_faulthander.py +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -4,13 +4,14 @@ } def test_enable(self): - import faulthandler + import faulthandler, sys faulthandler.enable() assert faulthandler.is_enabled() is True + faulthandler.enable(file=sys.stderr, all_threads=True) faulthandler.disable() assert faulthandler.is_enabled() is False def test_dump_traceback(self): - import faulthandler + import faulthandler, sys faulthandler.dump_traceback() - + faulthandler.dump_traceback(file=sys.stderr, all_threads=True) diff --git a/pypy/module/faulthandler/test/test_ztranslation.py b/pypy/module/faulthandler/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_faulthandler_translates(): + checkmodule('faulthandler') From noreply at buildbot.pypy.org Sun Oct 5 20:23:27 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 5 Oct 2014 20:23:27 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: hg merge py3.3 Message-ID: <20141005182327.242721C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73817:8f8d7ad8360d Date: 2014-10-05 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/8f8d7ad8360d/ Log: hg merge py3.3 diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -60,7 +60,8 @@ skip=None): self.basename = basename self._usemodules = usemodules.split() + [ - '_socket', 'binascii', 'rctime', 'select', 'signal'] + '_socket', 'binascii', 'rctime', + 'select', 'signal', 'faulthandler'] if not sys.platform == 'win32': self._usemodules.extend(['_posixsubprocess', 'fcntl']) self._compiler = compiler diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -22,7 +22,7 @@ def _where_is_errno(): return standard_c_lib._errno() -elif sys.platform in ('linux2', 'freebsd6'): +elif sys.platform in ('linux', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None def _where_is_errno(): diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -85,10 +85,13 @@ Abridged method (for -Ojit builds using Visual Studio 2008) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download the versions of all the external packages -from +Download the versions of all the external packages from +https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip +(for 2.4 release and later) or https://bitbucket.org/pypy/pypy/downloads/local.zip -Then expand it into the base directory (base_dir) and modify your environment to reflect this:: +(for pre-2.4 versions) +Then expand it into the base directory (base_dir) and modify your environment +to reflect this:: set PATH=\bin;\tcltk\bin;%PATH% set INCLUDE=\include;\tcltk\include;%INCLUDE% diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -46,6 +46,7 @@ def cpython_code_signature(code): "([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)." argcount = code.co_argcount + varnames = code.co_varnames if we_are_translated(): kwonlyargcount = code.co_kwonlyargcount else: @@ -53,16 +54,18 @@ kwonlyargcount = getattr(code, 'co_kwonlyargcount', 0) assert argcount >= 0 # annotator hint assert kwonlyargcount >= 0 - argnames = list(code.co_varnames[:argcount]) - kwonlyargs = list(code.co_varnames[argcount:argcount + kwonlyargcount]) + argnames = list(varnames[:argcount]) + if argcount < len(varnames): + kwonlyargs = list(varnames[argcount:argcount + kwonlyargcount]) + else: + kwonlyargs = None if code.co_flags & CO_VARARGS: - varargname = code.co_varnames[argcount] + varargname = varnames[argcount] argcount += 1 else: varargname = None if code.co_flags & CO_VARKEYWORDS: kwargname = code.co_varnames[argcount + kwonlyargcount] - argcount += 1 else: kwargname = None return Signature(argnames, varargname, kwargname, kwonlyargs) diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -100,11 +100,11 @@ def test_simple_enc_roundtrip(self): space = self.space - s = "'\x81'" + s = "'\x81\\t'" s = s.decode("koi8-u").encode("utf8") w_ret = parsestring.parsestr(self.space, 'koi8-u', s) ret = space.unwrap(w_ret) - assert ret == eval("# -*- coding: koi8-u -*-\nu'\x81'") + assert ret == eval("# -*- coding: koi8-u -*-\nu'\x81\\t'") def test_multiline_unicode_strings_with_backslash(self): space = self.space diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -1064,7 +1064,7 @@ prefix = udir.join('pathtest').ensure(dir=1) fake_exe = 'bin/pypy-c' if sys.platform == 'win32': - fake_exe += '.exe' + fake_exe = 'pypy-c.exe' fake_exe = prefix.join(fake_exe).ensure(file=1) expected_path = [str(prefix.join(subdir).ensure(dir=1)) for subdir in ('lib_pypy', @@ -1104,8 +1104,10 @@ assert sys.path == old_sys_path + [self.goal_dir] app_main.setup_bootstrap_path(self.fake_exe) - assert sys.executable == '' # not executable! - assert sys.path == old_sys_path + [self.goal_dir] + if not sys.platform == 'win32': + # an existing file is always 'executable' on windows + assert sys.executable == '' # not executable! + assert sys.path == old_sys_path + [self.goal_dir] os.chmod(self.fake_exe, 0o755) app_main.setup_bootstrap_path(self.fake_exe) diff --git a/pypy/interpreter/test/test_code.py b/pypy/interpreter/test/test_code.py --- a/pypy/interpreter/test/test_code.py +++ b/pypy/interpreter/test/test_code.py @@ -194,3 +194,9 @@ # CO_NESTED assert d['f'](4).__code__.co_flags & 0x10 assert d['f'].__code__.co_flags & 0x10 == 0 + + def test_issue1844(self): + import types + args = (1, 0, 1, 0, 0, b'', (), (), (), '', 'operator', 0, b'') + # previously raised a MemoryError when translated + types.CodeType(*args) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -8,6 +8,7 @@ from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt @@ -160,6 +161,7 @@ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') + keepalive_until_here(args_w) return w_res def get_mustfree_flag(data): diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -4,7 +4,8 @@ This module exports a set of operators as functions. E.g. operator.add(x,y) is equivalent to x+y. ''' -from __pypy__ import builtinify + +import types def countOf(a,b): @@ -15,51 +16,78 @@ count += 1 return count +def _resolve_attr_chain(chain, obj, idx=0): + obj = getattr(obj, chain[idx]) + if idx + 1 == len(chain): + return obj + else: + return _resolve_attr_chain(chain, obj, idx + 1) + + +class _simple_attrgetter(object): + def __init__(self, attr): + self._attr = attr + + def __call__(self, obj): + return getattr(obj, self._attr) + + +class _single_attrgetter(object): + def __init__(self, attrs): + self._attrs = attrs + + def __call__(self, obj): + return _resolve_attr_chain(self._attrs, obj) + + +class _multi_attrgetter(object): + def __init__(self, attrs): + self._attrs = attrs + + def __call__(self, obj): + return tuple([ + _resolve_attr_chain(attrs, obj) + for attrs in self._attrs + ]) + + def attrgetter(attr, *attrs): + if ( + not isinstance(attr, str) or + not all(isinstance(a, str) for a in attrs) + ): + raise TypeError("attribute name must be a string, not %r" % + type(attr).__name__) if attrs: - getters = [single_attr_getter(a) for a in (attr,) + attrs] - def getter(obj): - return tuple([getter(obj) for getter in getters]) + return _multi_attrgetter([ + a.split(".") for a in [attr] + list(attrs) + ]) + elif "." not in attr: + return _simple_attrgetter(attr) else: - getter = single_attr_getter(attr) - return builtinify(getter) + return _single_attrgetter(attr.split(".")) -def single_attr_getter(attr): - if not isinstance(attr, str): - raise TypeError("attribute name must be a string, not {!r}".format( - type(attr).__name__)) - # - def make_getter(name, prevfn=None): - if prevfn is None: - def getter(obj): - return getattr(obj, name) + +class itemgetter(object): + def __init__(self, item, *items): + self._single = not bool(items) + if self._single: + self._idx = item else: - def getter(obj): - return getattr(prevfn(obj), name) - return getter - # - last = 0 - getter = None - while True: - dot = attr.find(".", last) - if dot < 0: break - getter = make_getter(attr[last:dot], getter) - last = dot + 1 - return make_getter(attr[last:], getter) + self._idx = [item] + list(items) + def __call__(self, obj): + if self._single: + return obj[self._idx] + else: + return tuple([obj[i] for i in self._idx]) -def itemgetter(item, *items): - if items: - list_of_indices = [item] + list(items) - def getter(obj): - return tuple([obj[i] for i in list_of_indices]) - else: - def getter(obj): - return obj[item] - return builtinify(getter) +class methodcaller(object): + def __init__(self, method_name, *args, **kwargs): + self._method_name = method_name + self._args = args + self._kwargs = kwargs -def methodcaller(method_name, *args, **kwargs): - def call(obj): - return getattr(obj, method_name)(*args, **kwargs) - return builtinify(call) + def __call__(self, obj): + return getattr(obj, self._method_name)(*self._args, **self._kwargs) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -168,6 +168,12 @@ # not visible via os, inconsistency in nt: if hasattr(posix, '_getfullpathname'): interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' + if os.name == 'nt': + interpleveldefs.update({ + '_getfileinformation': 'interp_posix._getfileinformation', + # XXX: currently broken + #'_getfinalpathname': 'interp_posix._getfinalpathname', + }) if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -5,8 +5,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.module import ll_os_stat -from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rtyper.module import ll_os, ll_os_stat from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 @@ -1234,7 +1233,7 @@ raise wrap_oserror(space, e) def declare_new_w_star(name): - if name in RegisterOs.w_star_returning_int: + if name in ll_os.RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) def WSTAR(space, status): return space.wrap(getattr(os, name)(status)) @@ -1246,7 +1245,7 @@ WSTAR.func_name = name return WSTAR -for name in RegisterOs.w_star: +for name in ll_os.RegisterOs.w_star: if hasattr(os, name): func = declare_new_w_star(name) globals()[name] = func @@ -1412,3 +1411,25 @@ if codeset: return space.wrap(codeset) return space.w_None + +if _WIN32: + @unwrap_spec(fd=c_int) + def _getfileinformation(space, fd): + try: + info = ll_os._getfileinformation(fd) + except OSError as e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(info[0]), + space.wrap(info[1]), + space.wrap(info[2])]) + + def _getfinalpathname(space, w_path): + path = space.unicode_w(w_path) + try: + result = ll_os._getfinalpathname(path) + except ll_os.LLNotImplemented as e: + raise OperationError(space.w_NotImplementedError, + space.wrap(e.msg)) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + return space.wrap(result) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1069,6 +1069,24 @@ # just ensure it returns something reasonable assert encoding is None or type(encoding) is str + if os.name == 'nt': + def test__getfileinformation(self): + import os + path = os.path.join(self.pdir, 'file1') + with open(path) as fp: + info = self.posix._getfileinformation(fp.fileno()) + assert len(info) == 3 + assert all(isinstance(obj, int) for obj in info) + + def test__getfinalpathname(self): + import os + path = os.path.join(self.pdir, 'file1') + try: + result = self.posix._getfinalpathname(path) + except NotImplementedError: + skip("_getfinalpathname not supported on this platform") + assert os.path.exists(result) + def test_rtld_constants(self): # check presence of major RTLD_* constants self.posix.RTLD_LAZY @@ -1076,6 +1094,7 @@ self.posix.RTLD_GLOBAL self.posix.RTLD_LOCAL + class AppTestEnvironment(object): def setup_class(cls): cls.space = space diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -23,7 +23,7 @@ '__name__' : '(space.wrap("sys"))', '__doc__' : '(space.wrap("PyPy sys module"))', - 'platform' : 'space.wrap(sys.platform)', + 'platform' : 'space.wrap(system.PLATFORM)', 'maxsize' : 'space.wrap(sys.maxint)', 'byteorder' : 'space.wrap(sys.byteorder)', 'maxunicode' : 'space.wrap(vm.MAXUNICODE)', diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -1,4 +1,6 @@ """Information about the current system.""" +import sys + from pypy.objspace.std.complexobject import HASH_IMAG from pypy.objspace.std.floatobject import HASH_INF, HASH_NAN from pypy.objspace.std.intobject import HASH_MODULUS @@ -6,6 +8,7 @@ from rpython.rlib import rbigint, rfloat from rpython.rtyper.lltypesystem import lltype, rffi +PLATFORM = 'linux' if sys.platform.startswith('linux') else sys.platform app = gateway.applevel(""" "NOT_RPYTHON" diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -127,9 +127,9 @@ assert isinstance(sys.__stdin__, io.IOBase) assert sys.__stderr__.errors == 'backslashreplace' - assert sys.__stdin__.name == "" - assert sys.__stdout__.name == "" - assert sys.__stderr__.name == "" + #assert sys.__stdin__.name == "" + #assert sys.__stdout__.name == "" + #assert sys.__stderr__.name == "" if self.appdirect and not isinstance(sys.stdin, io.IOBase): return diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -306,10 +306,16 @@ import imp argparse = imp.load_source('argparse', 'lib-python/2.7/argparse.py') if sys.platform == 'win32': - pypy_exe = 'pypy.exe' - license_base = os.path.join(basedir, r'..\..\..\local') # as on buildbot YMMV + pypy_exe = 'pypy3.exe' + for p in [os.path.join(basedir, r'..\..\..\local'), #buildbot + os.path.join(basedir, r'..\local')]: # pypy/doc/windows.rst + if os.path.exists(p): + license_base = p + break + else: + license_base = 'unkown' else: - pypy_exe = 'pypy' + pypy_exe = 'pypy3' license_base = '/usr/share/doc' parser = argparse.ArgumentParser() args = list(args) @@ -380,5 +386,21 @@ if __name__ == '__main__': import sys + if sys.platform == 'win32': + # Try to avoid opeing a dialog box if one of the + # subprocesses causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) + retval, _ = package(*sys.argv[1:]) sys.exit(retval) diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -115,15 +115,21 @@ check(pypy, 0755) def test_generate_license(): - from os.path import dirname, abspath, join + from os.path import dirname, abspath, join, exists class Options(object): pass options = Options() basedir = dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))) options.no_tk = False if sys.platform == 'win32': - # as on buildbot YMMV - options.license_base = join(basedir, r'..\..\..\local') + for p in [join(basedir, r'..\..\..\local'), #buildbot + join(basedir, r'..\local')]: # pypy/doc/windows.rst + if exists(p): + license_base = p + break + else: + license_base = 'unkown' + options.license_base = license_base else: options.license_base = '/usr/share/doc' license = package.generate_license(py.path.local(basedir), options) diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -92,7 +92,8 @@ self.mc.LDR_ri(r.r7.value, r.r5.value) # change 'rpy_fastgil' to 0 (it should be non-zero right now) - self.mc.DMB() + if self.asm.cpu.cpuinfo.arch_version >= 7: + self.mc.DMB() self.mc.gen_load_int(r.r6.value, fastgil) self.mc.MOV_ri(r.ip.value, 0) self.mc.STR_ri(r.ip.value, r.r6.value) @@ -112,7 +113,8 @@ self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ) # try to claim the lock self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed? - self.mc.DMB() + if self.asm.cpu.cpuinfo.arch_version >= 7: + self.mc.DMB() # the success of the lock acquisition is defined by # 'EQ is true', or equivalently by 'r3 == 0'. # diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -333,6 +333,8 @@ | (rn & 0xF) << 16) def DMB(self): + # ARMv7 only. I guess ARMv6 CPUs cannot be used in symmetric + # multi-processing at all? That would make this instruction unneeded. # note: 'cond' is only permitted on Thumb here, but don't # write literally 0xf57ff05f, because it's larger than 31 bits c = cond.AL diff --git a/rpython/jit/backend/arm/instructions.py b/rpython/jit/backend/arm/instructions.py --- a/rpython/jit/backend/arm/instructions.py +++ b/rpython/jit/backend/arm/instructions.py @@ -142,6 +142,7 @@ #'VCVT' : {'opc1':0xB, 'opc2':0xE, 'opc3':0x1, 'base': False}, } +# ARMv7 only simd_instructions_3regs = { 'VADD_i64': {'A': 0x8, 'B': 0, 'U': 0}, 'VSUB_i64': {'A': 0x8, 'B': 0, 'U': 1}, diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,3 +1,4 @@ +from rpython.rlib import rgc from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import llmemory from rpython.jit.metainterp import history @@ -390,8 +391,8 @@ val = op.getarg(0) if val not in self.write_barrier_applied: v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL + if (isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + rgc.needs_write_barrier(v.value))): self.gen_write_barrier(val) #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) @@ -400,8 +401,8 @@ val = op.getarg(0) if val not in self.write_barrier_applied: v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL + if (isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + rgc.needs_write_barrier(v.value))): self.gen_write_barrier_array(val, op.getarg(1)) #op = op.copy_and_change(rop.SET{ARRAYITEM,INTERIORFIELD}_RAW) self.newops.append(op) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -158,3 +158,4 @@ return res LoadLibrary = rwin32.LoadLibrary + GetModuleHandle = rwin32.GetModuleHandle diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -86,6 +86,14 @@ collect(i) i += 1 +def needs_write_barrier(obj): + """ We need to emit write barrier if the right hand of assignment + is in nursery, used by the JIT for handling set*_gc(Const) + """ + if not obj: + return False + return can_move(obj) + def _heap_stats(): raise NotImplementedError # can't be run directly diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -130,6 +130,7 @@ # is hidden by operations in ll2ctypes. Call it now. GetLastError() + GetModuleHandle = winexternal('GetModuleHandleA', [rffi.CCHARP], HMODULE) LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], HMODULE) GetProcAddress = winexternal('GetProcAddress', [HMODULE, rffi.CCHARP], diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -361,7 +361,9 @@ functype = ctypes.CFUNCTYPE if sys.platform == 'win32': from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI - if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == FFI_STDCALL: + # XXX: + #if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == FFI_STDCALL: + if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == 'FFI_STDCALL': # for win32 system call functype = ctypes.WINFUNCTYPE argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1,16 +1,17 @@ -from types import NoneType, MethodType import weakref +from types import MethodType, NoneType + +from rpython.annotator.bookkeeper import analyzer_for, immutablevalue from rpython.annotator.model import ( - SomeInteger, SomeBool, SomeObject, AnnotatorError) + AnnotatorError, SomeBool, SomeInteger, SomeObject) +from rpython.rlib.objectmodel import Symbolic from rpython.rlib.rarithmetic import ( - r_int, r_uint, intmask, r_singlefloat, r_ulonglong, r_longlong, - r_longfloat, r_longlonglong, base_int, normalizedinttype, longlongmask, - longlonglongmask, maxint, is_valid_int, is_emulated_long) -from rpython.rlib.objectmodel import Symbolic + base_int, intmask, is_emulated_long, is_valid_int, longlonglongmask, + longlongmask, maxint, normalizedinttype, r_int, r_longfloat, r_longlong, + r_longlonglong, r_singlefloat, r_uint, r_ulonglong) +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.tool import leakfinder from rpython.tool.identity_dict import identity_dict -from rpython.tool import leakfinder -from rpython.annotator.bookkeeper import analyzer_for, immutablevalue -from rpython.rtyper.extregistry import ExtRegistryEntry class State(object): pass @@ -313,14 +314,12 @@ except KeyError: return ContainerType.__getattr__(self, name) - def _nofield(self, name): raise AttributeError('struct %s has no field %r' % (self._name, name)) def _names_without_voids(self): - names_without_voids = [name for name in self._names if self._flds[name] is not Void] - return names_without_voids + return [name for name in self._names if self._flds[name] is not Void] def _str_fields_without_voids(self): return ', '.join(['%s: %s' % (name, self._flds[name]) @@ -576,8 +575,10 @@ _gckind = 'raw' def __init__(self, tag, hints={}): - """ if hints['render_structure'] is set, the type is internal and not considered - to come from somewhere else (it should be rendered as a structure) """ + """If hints['render_structure'] is set, the type is internal and + not considered to come from somewhere else (it should be + rendered as a structure) + """ self.tag = tag self.__name__ = tag self.hints = frozendict(hints) @@ -675,7 +676,8 @@ _numbertypes = {int: Number("Signed", int, intmask)} _numbertypes[r_int] = _numbertypes[int] -_numbertypes[r_longlonglong] = Number("SignedLongLongLong", r_longlonglong, longlonglongmask) +_numbertypes[r_longlonglong] = Number("SignedLongLongLong", r_longlonglong, + longlonglongmask) if r_longlong is not r_int: _numbertypes[r_longlong] = Number("SignedLongLong", r_longlong, longlongmask) @@ -702,8 +704,8 @@ UnsignedLongLong = build_number("UnsignedLongLong", r_ulonglong) Float = Primitive("Float", 0.0) # C type 'double' -SingleFloat = Primitive("SingleFloat", r_singlefloat(0.0)) # C type 'float' -LongFloat = Primitive("LongFloat", r_longfloat(0.0)) # C type 'long double' +SingleFloat = Primitive("SingleFloat", r_singlefloat(0.0)) # 'float' +LongFloat = Primitive("LongFloat", r_longfloat(0.0)) # 'long double' r_singlefloat._TYPE = SingleFloat Char = Primitive("Char", '\x00') @@ -876,9 +878,11 @@ @analyzer_for(cast_primitive) def ann_cast_primitive(T, s_v): - from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation + from rpython.rtyper.llannotation import ( + annotation_to_lltype, ll_to_annotation) assert T.is_constant() - return ll_to_annotation(cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) + return ll_to_annotation(cast_primitive(T.const, + annotation_to_lltype(s_v)._defl())) def _cast_whatever(TGT, value): @@ -905,7 +909,8 @@ elif TGT == llmemory.Address and isinstance(ORIG, Ptr): return llmemory.cast_ptr_to_adr(value) elif TGT == Signed and isinstance(ORIG, Ptr) and ORIG.TO._gckind == 'raw': - return llmemory.cast_adr_to_int(llmemory.cast_ptr_to_adr(value), 'symbolic') + return llmemory.cast_adr_to_int(llmemory.cast_ptr_to_adr(value), + 'symbolic') raise TypeError("don't know how to cast from %r to %r" % (ORIG, TGT)) @@ -1176,8 +1181,8 @@ except DelayedPointer: return True # assume it's not a delayed null - # _setobj, _getobj and _obj0 are really _internal_ implementations details of _ptr, - # use _obj if necessary instead ! + # _setobj, _getobj and _obj0 are really _internal_ implementations + # details of _ptr, use _obj if necessary instead ! def _setobj(self, pointing_to, solid=False): if pointing_to is None: obj0 = None @@ -1244,12 +1249,12 @@ if T1 == T2: setattr(self._obj, field_name, val) else: - raise TypeError("%r instance field %r:\n" - "expects %r\n" - " got %r" % (self._T, field_name, T1, T2)) + raise TypeError( + "%r instance field %r:\nexpects %r\n got %r" % + (self._T, field_name, T1, T2)) return - raise AttributeError("%r instance has no field %r" % (self._T, - field_name)) + raise AttributeError("%r instance has no field %r" % + (self._T, field_name)) def __getitem__(self, i): # ! can only return basic or ptr ! if isinstance(self._T, (Array, FixedSizeArray)): @@ -1266,7 +1271,8 @@ if isinstance(self._T, (Array, FixedSizeArray)): T1 = self._T.OF if isinstance(T1, ContainerType): - raise TypeError("cannot directly assign to container array items") + raise TypeError("cannot directly assign to container array " + "items") T2 = typeOf(val) if T2 != T1: from rpython.rtyper.lltypesystem import rffi @@ -1316,7 +1322,8 @@ from rpython.rtyper.lltypesystem import rffi if isinstance(self._T, FuncType): if len(args) != len(self._T.ARGS): - raise TypeError("calling %r with wrong argument number: %r" % (self._T, args)) + raise TypeError("calling %r with wrong argument number: %r" % + (self._T, args)) for i, a, ARG in zip(range(len(self._T.ARGS)), args, self._T.ARGS): if typeOf(a) != ARG: # ARG could be Void @@ -1415,11 +1422,13 @@ raise RuntimeError("widening to trash: %r" % self) PARENTTYPE = struc._parent_type if getattr(parent, PARENTTYPE._names[0]) != struc: - raise InvalidCast(CURTYPE, PTRTYPE) # xxx different exception perhaps? + # xxx different exception perhaps? + raise InvalidCast(CURTYPE, PTRTYPE) struc = parent u -= 1 if PARENTTYPE != PTRTYPE.TO: - raise RuntimeError("widening %r inside %r instead of %r" % (CURTYPE, PARENTTYPE, PTRTYPE.TO)) + raise RuntimeError("widening %r inside %r instead of %r" % + (CURTYPE, PARENTTYPE, PTRTYPE.TO)) return _ptr(PTRTYPE, struc, solid=self._solid) def _cast_to_int(self, check=True): @@ -1430,7 +1439,9 @@ return obj # special case for cast_int_to_ptr() results obj = normalizeptr(self, check)._getobj(check) if isinstance(obj, int): - return obj # special case for cast_int_to_ptr() results put into opaques + # special case for cast_int_to_ptr() results put into + # opaques + return obj if getattr(obj, '_read_directly_intval', False): return obj.intval # special case for _llgcopaque result = intmask(obj._getid()) @@ -1468,7 +1479,8 @@ """XXX A nice docstring here""" T = typeOf(val) if isinstance(T, ContainerType): - if self._T._gckind == 'gc' and T._gckind == 'raw' and not isinstance(T, OpaqueType): + if (self._T._gckind == 'gc' and T._gckind == 'raw' and + not isinstance(T, OpaqueType)): val = _interior_ptr(T, self._obj, [offset]) else: val = _ptr(Ptr(T), val, solid=self._solid) @@ -1531,12 +1543,14 @@ setattr(example, s_attr.const, v_lltype._defl()) def call(self, args): - from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation + from rpython.rtyper.llannotation import ( + annotation_to_lltype, ll_to_annotation) args_s, kwds_s = args.unpack() if kwds_s: raise Exception("keyword arguments to call to a low-level fn ptr") info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] + llargs = [annotation_to_lltype(s_arg, info)._defl() + for s_arg in args_s] v = self.ll_ptrtype._example()(*llargs) return ll_to_annotation(v) @@ -1593,7 +1607,6 @@ return val - assert not '__dict__' in dir(_interior_ptr) class _container(object): @@ -1721,11 +1734,13 @@ __slots__ = ('_hash_cache_', '_compilation_info') - def __new__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None): + def __new__(self, TYPE, n=None, initialization=None, parent=None, + parentindex=None): my_variety = _struct_variety(TYPE._names) return object.__new__(my_variety) - def __init__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None): + def __init__(self, TYPE, n=None, initialization=None, parent=None, + parentindex=None): _parentable.__init__(self, TYPE) if n is not None and TYPE._arrayfld is None: raise TypeError("%r is not variable-sized" % (TYPE,)) @@ -1734,9 +1749,11 @@ first, FIRSTTYPE = TYPE._first_struct() for fld, typ in TYPE._flds.items(): if fld == TYPE._arrayfld: - value = _array(typ, n, initialization=initialization, parent=self, parentindex=fld) + value = _array(typ, n, initialization=initialization, + parent=self, parentindex=fld) else: - value = typ._allocate(initialization=initialization, parent=self, parentindex=fld) + value = typ._allocate(initialization=initialization, + parent=self, parentindex=fld) setattr(self, fld, value) if parent is not None: self._setparentstructure(parent, parentindex) @@ -1795,7 +1812,8 @@ __slots__ = ('items',) - def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None): + def __init__(self, TYPE, n, initialization=None, parent=None, + parentindex=None): if not is_valid_int(n): raise TypeError("array length must be an int") if n < 0: @@ -1964,7 +1982,8 @@ if not key._was_freed(): newcache[key] = value except RuntimeError: - pass # ignore "accessing subxxx, but already gc-ed parent" + # ignore "accessing subxxx, but already gc-ed parent" + pass if newcache: _subarray._cache[T] = newcache else: @@ -2020,8 +2039,10 @@ attrs.setdefault('_name', '?') attrs.setdefault('_callable', None) self.__dict__.update(attrs) - if '_callable' in attrs and hasattr(attrs['_callable'], '_compilation_info'): - self.__dict__['compilation_info'] = attrs['_callable']._compilation_info + if '_callable' in attrs and hasattr(attrs['_callable'], + '_compilation_info'): + self.__dict__['compilation_info'] = \ + attrs['_callable']._compilation_info def __repr__(self): return '<%s>' % (self,) @@ -2126,8 +2147,8 @@ return _ptr(Ptr(T), o, solid) @analyzer_for(malloc) -def ann_malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, - s_add_memory_pressure=None): +def ann_malloc(s_T, s_n=None, s_flavor=None, s_zero=None, + s_track_allocation=None, s_add_memory_pressure=None): assert (s_n is None or s_n.knowntype == int or issubclass(s_n.knowntype, base_int)) assert s_T.is_constant() @@ -2303,7 +2324,8 @@ @analyzer_for(runtime_type_info) def ann_runtime_type_info(s_p): - assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p + assert isinstance(s_p, SomePtr), \ + "runtime_type_info of non-pointer: %r" % s_p return SomePtr(typeOf(runtime_type_info(s_p.ll_ptrtype._example()))) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -105,6 +105,12 @@ _CYGWIN = sys.platform == 'cygwin' +# plain NotImplementedError is invalid RPython +class LLNotImplemented(NotImplementedError): + + def __init__(self, msg): + self.msg = msg + class CConfig: """ Definitions for platform integration. @@ -1179,7 +1185,7 @@ condition=sys.platform=='win32') def register_posix__getfullpathname(self, traits): # this nt function is not exposed via os, but needed - # to get a correct implementation of os.abspath + # to get a correct implementation of os.path.abspath from rpython.rtyper.module.ll_win32file import make_getfullpathname_impl getfullpathname_llimpl = make_getfullpathname_impl(traits) @@ -1963,10 +1969,12 @@ return OsEnvironController() # ____________________________________________________________ -# Support for the WindowsError exception +# Support for the WindowsError exception and misc functions if sys.platform == 'win32': from rpython.rlib import rwin32 + from rpython.rtyper.module.ll_win32file import ( + make__getfileinformation_impl, make__getfinalpathname_impl) class RegisterFormatError(BaseLazyRegistering): def __init__(self): @@ -1977,3 +1985,6 @@ return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError) + + _getfileinformation = make__getfileinformation_impl(UnicodeTraits()) + _getfinalpathname = make__getfinalpathname_impl(UnicodeTraits()) diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -186,7 +186,10 @@ _name_struct_stat = '_stati64' INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: - _name_struct_stat = 'stat' + if sys.platform.startswith('linux'): + _name_struct_stat = 'stat64' + else: + _name_struct_stat = 'stat' INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( diff --git a/rpython/rtyper/module/ll_win32file.py b/rpython/rtyper/module/ll_win32file.py --- a/rpython/rtyper/module/ll_win32file.py +++ b/rpython/rtyper/module/ll_win32file.py @@ -55,6 +55,15 @@ FILE_TYPE_CHAR = platform.ConstantInteger('FILE_TYPE_CHAR') FILE_TYPE_PIPE = platform.ConstantInteger('FILE_TYPE_PIPE') + FILE_WRITE_ATTRIBUTES = platform.ConstantInteger( + 'FILE_WRITE_ATTRIBUTES') + OPEN_EXISTING = platform.ConstantInteger( + 'OPEN_EXISTING') + FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger( + 'FILE_FLAG_BACKUP_SEMANTICS') + VOLUME_NAME_DOS = platform.ConstantInteger('VOLUME_NAME_DOS') + VOLUME_NAME_NT = platform.ConstantInteger('VOLUME_NAME_NT') + WIN32_FILE_ATTRIBUTE_DATA = platform.Struct( 'WIN32_FILE_ATTRIBUTE_DATA', [('dwFileAttributes', rwin32.DWORD), @@ -67,14 +76,15 @@ BY_HANDLE_FILE_INFORMATION = platform.Struct( 'BY_HANDLE_FILE_INFORMATION', [('dwFileAttributes', rwin32.DWORD), + ('ftCreationTime', rwin32.FILETIME), + ('ftLastAccessTime', rwin32.FILETIME), + ('ftLastWriteTime', rwin32.FILETIME), + ('dwVolumeSerialNumber', rwin32.DWORD), ('nFileSizeHigh', rwin32.DWORD), ('nFileSizeLow', rwin32.DWORD), ('nNumberOfLinks', rwin32.DWORD), ('nFileIndexHigh', rwin32.DWORD), - ('nFileIndexLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME)]) + ('nFileIndexLow', rwin32.DWORD)]) config = platform.configure(CConfig) @@ -92,6 +102,8 @@ INVALID_FILE_ATTRIBUTES _S_IFDIR _S_IFREG _S_IFCHR _S_IFIFO FILE_TYPE_UNKNOWN FILE_TYPE_CHAR FILE_TYPE_PIPE + FILE_WRITE_ATTRIBUTES OPEN_EXISTING FILE_FLAG_BACKUP_SEMANTICS + VOLUME_NAME_DOS VOLUME_NAME_NT ERROR_FILE_NOT_FOUND ERROR_NO_MORE_FILES ERROR_SHARING_VIOLATION '''.split(): @@ -163,6 +175,13 @@ [traits.CCHARP, traits.CCHARP], rwin32.BOOL) + CreateFile = external( + 'CreateFile' + apisuffix, + [traits.CCHARP, rwin32.DWORD, rwin32.DWORD, + rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, + rwin32.HANDLE], + rwin32.HANDLE) + DeleteFile = external( 'DeleteFile' + suffix, [traits.CCHARP], @@ -173,7 +192,29 @@ [traits.CCHARP, traits.CCHARP], rwin32.BOOL) - return Win32Traits + GETFINALPATHNAMEBYHANDLE_TP = lltype.Ptr(lltype.FuncType( + [rwin32.HANDLE, traits.CCHARP, rwin32.DWORD, rwin32.DWORD], + rwin32.DWORD, abi='FFI_STDCALL')) + # dynamically loaded + GetFinalPathNameByHandle = lltype.nullptr( + GETFINALPATHNAMEBYHANDLE_TP.TO) + + def check_GetFinalPathNameByHandle(self): + if self.GetFinalPathNameByHandle: + return True + + from rpython.rlib.rdynload import GetModuleHandle, dlsym + hKernel32 = GetModuleHandle("KERNEL32") + try: + func = dlsym(hKernel32, 'GetFinalPathNameByHandle' + suffix) + except KeyError: + return False + + self.GetFinalPathNameByHandle = rffi.cast( + Win32Traits.GETFINALPATHNAMEBYHANDLE_TP, func) + return True + + return Win32Traits() #_______________________________________________________________ # listdir @@ -336,27 +377,6 @@ win32traits = make_win32_traits(traits) from rpython.rtyper.module.ll_os_stat import time_t_to_FILE_TIME - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['windows.h'], - ) - - FILE_WRITE_ATTRIBUTES = platform.ConstantInteger( - 'FILE_WRITE_ATTRIBUTES') - OPEN_EXISTING = platform.ConstantInteger( - 'OPEN_EXISTING') - FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger( - 'FILE_FLAG_BACKUP_SEMANTICS') - globals().update(platform.configure(CConfig)) - - CreateFile = rffi.llexternal( - 'CreateFile' + win32traits.apisuffix, - [traits.CCHARP, rwin32.DWORD, rwin32.DWORD, - rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, - rwin32.HANDLE], - rwin32.HANDLE, - calling_conv='win') - GetSystemTime = rffi.llexternal( 'GetSystemTime', [lltype.Ptr(rwin32.SYSTEMTIME)], @@ -381,10 +401,10 @@ @specialize.argtype(1) def os_utime_llimpl(path, tp): - hFile = CreateFile(path, - FILE_WRITE_ATTRIBUTES, 0, - None, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, + hFile = win32traits.CreateFile(path, + win32traits.FILE_WRITE_ATTRIBUTES, 0, + None, win32traits.OPEN_EXISTING, + win32traits.FILE_FLAG_BACKUP_SEMANTICS, rwin32.NULL_HANDLE) if hFile == rwin32.INVALID_HANDLE_VALUE: raise rwin32.lastWindowsError() @@ -413,3 +433,68 @@ lltype.free(mtime, flavor='raw') return os_utime_llimpl + +#_______________________________________________________________ +# _getfileinformation (py3) + +def make__getfileinformation_impl(traits): + from rpython.rlib import rwin32 + win32traits = make_win32_traits(traits) + + def _getfileinformation_llimpl(fd): + hFile = rwin32.get_osfhandle(fd) + with lltype.scoped_alloc( + win32traits.BY_HANDLE_FILE_INFORMATION) as info: + if win32traits.GetFileInformationByHandle(hFile, info) == 0: + raise rwin32.lastWindowsError("_getfileinformation") + return (rffi.cast(lltype.Signed, info.c_dwVolumeSerialNumber), + rffi.cast(lltype.Signed, info.c_nFileIndexHigh), + rffi.cast(lltype.Signed, info.c_nFileIndexLow)) + + return _getfileinformation_llimpl + +#_______________________________________________________________ +# _getfinalpathname (py3) + +def make__getfinalpathname_impl(traits): + from rpython.rlib import rwin32 + from rpython.rtyper.module.ll_os import LLNotImplemented + assert traits.str is unicode, 'Currently only handles unicode paths' + win32traits = make_win32_traits(traits) + + def _getfinalpathname_llimpl(path): + if not win32traits.check_GetFinalPathNameByHandle(): + raise LLNotImplemented("GetFinalPathNameByHandle not available on " + "this platform") + + hFile = win32traits.CreateFile(path, 0, 0, None, + win32traits.OPEN_EXISTING, + win32traits.FILE_FLAG_BACKUP_SEMANTICS, + rwin32.NULL_HANDLE) + if hFile == rwin32.INVALID_HANDLE_VALUE: + raise rwin32.lastWindowsError("CreateFile") + + VOLUME_NAME_DOS = rffi.cast(rwin32.DWORD, win32traits.VOLUME_NAME_DOS) + try: + size = win32traits.GetFinalPathNameByHandle( + hFile, + lltype.nullptr(traits.CCHARP.TO), + rffi.cast(rwin32.DWORD, 0), + VOLUME_NAME_DOS) + if size == 0: + raise rwin32.lastWindowsError("GetFinalPathNameByHandle") + + with lltype.scoped_alloc(traits.CCHARP.TO, size + 1) as target_path: + result = win32traits.GetFinalPathNameByHandle( + hFile, + target_path, + size, + VOLUME_NAME_DOS) + if result == 0: + raise rwin32.lastWindowsError("GetFinalPathNameByHandle") + return traits.charpsize2str(target_path, + rffi.cast(lltype.Signed, result)) + finally: + rwin32.CloseHandle(hFile) + + return _getfinalpathname_llimpl diff --git a/rpython/rtyper/module/support.py b/rpython/rtyper/module/support.py --- a/rpython/rtyper/module/support.py +++ b/rpython/rtyper/module/support.py @@ -49,6 +49,7 @@ CHAR = rffi.CHAR CCHARP = rffi.CCHARP charp2str = staticmethod(rffi.charp2str) + charpsize2str = staticmethod(rffi.charpsize2str) scoped_str2charp = staticmethod(rffi.scoped_str2charp) str2charp = staticmethod(rffi.str2charp) free_charp = staticmethod(rffi.free_charp) @@ -68,6 +69,7 @@ CHAR = rffi.WCHAR_T CCHARP = rffi.CWCHARP charp2str = staticmethod(rffi.wcharp2unicode) + charpsize2str = staticmethod(rffi.wcharpsize2unicode) str2charp = staticmethod(rffi.unicode2wcharp) scoped_str2charp = staticmethod(rffi.scoped_unicode2wcharp) free_charp = staticmethod(rffi.free_wcharp) diff --git a/rpython/rtyper/module/test/test_ll_win32file.py b/rpython/rtyper/module/test/test_ll_win32file.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/module/test/test_ll_win32file.py @@ -0,0 +1,26 @@ +import os + +import py + +from rpython.rtyper.module import ll_os + +if not ll_os._WIN32: + py.test.skip("requires Windows") + + +def test__getfileinformation(): + with open(__file__) as fp: + stat = os.fstat(fp.fileno()) + info = ll_os._getfileinformation(fp.fileno()) + serial, high, low = info + assert type(serial) in (int, long) + assert (high << 32) + low == stat.st_ino + + +def test__getfinalpathname(): + path = __file__.decode('mbcs') + try: + result = ll_os._getfinalpathname(path) + except ll_os.LLNotImplemented: + py.test.skip("_getfinalpathname not supported on this platform") + assert os.path.exists(result) diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -476,11 +476,12 @@ shutil_copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': - # copy the import library as well - libname = soname.new(ext='lib') - newlibname = newexename.new(basename=soname.basename) - shutil.copyfile(str(libname), str(newlibname.new(ext='lib'))) - self.log.info("copied: %s" % (newlibname,)) + ext_to_copy = ['lib', 'pdb'] + for ext in ext_to_copy: + name = soname.new(ext=ext) + newname = newexename.new(basename=soname.basename) + shutil.copyfile(str(name), str(newname.new(ext=ext))) + self.log.info("copied: %s" % (newname,)) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -44,6 +44,7 @@ assert res.returncode == 0 def test_900_files(self): + tmpdir = udir.join('test_900_files').ensure(dir=1) txt = '#include \n' for i in range(900): txt += 'int func%03d();\n' % i @@ -52,11 +53,11 @@ txt += ' j += func%03d();\n' % i txt += ' printf("%d\\n", j);\n' txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') + cfile = tmpdir.join('test_900_files.c') cfile.write(txt) cfiles = [cfile] for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) + cfile2 = tmpdir.join('implement%03d.c' %i) cfile2.write(''' int func%03d() { @@ -64,10 +65,10 @@ } ''' % (i, i)) cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=tmpdir) mk.write() self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) + res = self.platform.execute(tmpdir.join('test_900_files')) self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -203,6 +203,9 @@ # the assembler still has the old behavior that all options # must come first, and after the file name all options are ignored. # So please be careful with the order of parameters! ;-) + pdb_dir = oname.dirname + if pdb_dir: + compile_args += ['/Fd%s\\' % (pdb_dir,)] args = ['/nologo', '/c'] + compile_args + ['/Fo%s' % (oname,), str(cfile)] self._execute_c_compiler(cc, args, oname) return oname @@ -407,7 +410,7 @@ 'int main(int argc, char* argv[]) ' '{ return $(PYPY_MAIN_FUNCTION)(argc, argv); } > $@') m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.obj'], - ['$(CC_LINK) /nologo main.obj $(SHARED_IMPORT_LIB) /out:$@ /MANIFEST /MANIFESTFILE:$*.manifest', + ['$(CC_LINK) /nologo /debug main.obj $(SHARED_IMPORT_LIB) /out:$@ /MANIFEST /MANIFESTFILE:$*.manifest', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'], diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -60,8 +60,7 @@ def need_more_data(self): buflen = self.buflen - buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - try: + with lltype.scoped_alloc(rffi.CCHARP.TO, buflen) as buf: buflen = rffi.cast(rffi.SIZE_T, buflen) count = ll_read_not_sandboxed(self.fd, buf, buflen) count = rffi.cast(lltype.Signed, count) @@ -69,20 +68,15 @@ raise IOError self.buf += ''.join([buf[i] for i in range(count)]) self.buflen *= 2 - finally: - lltype.free(buf, flavor='raw') def sandboxed_io(buf): STDIN = 0 STDOUT = 1 # send the buffer with the marshalled fnname and input arguments to STDOUT - p = lltype.malloc(rffi.CCHARP.TO, len(buf), flavor='raw') - try: + with lltype.scoped_alloc(rffi.CCHARP.TO, len(buf)) as p: for i in range(len(buf)): p[i] = buf[i] writeall_not_sandboxed(STDOUT, p, len(buf)) - finally: - lltype.free(p, flavor='raw') # build a Loader that will get the answer from STDIN loader = FdLoader(STDIN) # check for errors @@ -108,9 +102,8 @@ @signature(types.str(), returns=types.impossible()) def not_implemented_stub(msg): STDERR = 2 - buf = rffi.str2charp(msg + '\n') - writeall_not_sandboxed(STDERR, buf, len(msg) + 1) - rffi.free_charp(buf) + with rffi.scoped_str2charp(msg + '\n') as buf: + writeall_not_sandboxed(STDERR, buf, len(msg) + 1) raise RuntimeError(msg) # XXX in RPython, the msg is ignored at the moment dump_string = rmarshal.get_marshaller(str) diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -55,12 +55,15 @@ src_name = udir.join('src/dydy2.exe') dll_name = udir.join('src/pypy.dll') lib_name = udir.join('src/pypy.lib') + pdb_name = udir.join('src/pypy.pdb') src_name.ensure() src_name.write('exe') dll_name.ensure() dll_name.write('dll') lib_name.ensure() lib_name.write('lib') + pdb_name.ensure() + pdb_name.write('pdb') dst_name.ensure() class CBuilder(object): @@ -75,6 +78,8 @@ assert dst_name.new(ext='lib').read() == 'lib' def test_shutil_copy(): + if os.name == 'nt': + py.test.skip('Windows cannot copy or rename to an in-use file') a = udir.join('file_a') b = udir.join('file_a') a.write('hello') From noreply at buildbot.pypy.org Sun Oct 5 21:03:03 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 5 Oct 2014 21:03:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: More slides Message-ID: <20141005190303.CF2EF1C023E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5422:2e225431af6a Date: 2014-10-01 16:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/2e225431af6a/ Log: More slides diff --git a/talk/pyconie2014/talk.rst b/talk/pyconie2014/talk.rst --- a/talk/pyconie2014/talk.rst +++ b/talk/pyconie2014/talk.rst @@ -1,4 +1,4 @@ -.. include:: beamerdefs.tx +.. include:: beamerdefs.txt PyPy : A fast Python Virtual Machine ==================================== @@ -73,4 +73,52 @@ - If the type of the object is different from the type in the trace, go back to the interpreter : "guard failure" -- If a guard fails too many times, optimize the trace for the other types frequently encountered +- If a guard fails too many times, generate traces for the other types frequently encountered + +Compatibility +------------- + +- Fully compatible with CPython 2.7 & 3.2 (minus bugs & implementation specific features) + +- Partial and slow support of the C-API + +- Alternatives might exist + +Ecosystem +--------- + +- Just my opinion + +- We should move away from the C-API + + * Makes assumptions on refcounting, object layout, the GIL + + * The future of Python is bound to the future of CPython (a more than 20 years old interpreter) + + * It's hard for a new Python VM without C extension support to get traction (not only PyPy) + +- This doesn't mean we should lose Python's ability to interface with C easily + +CFFI +---- + +- Where do we go from here ? + +- CFFI is a fairly new way of interacting with C in an implementation independant way + +- Very fast on PyPy + +- Decently fast on CPython + +- The Jython project is working on fast support + +CFFI +---- + +- More convenient, safer, faster than ctypes + +- Python functions can be exposed to C easily + +- Already used by pyopenssl, psycopg2cffi, pygame_cffi, lxml_cffi + +- Other tools could be built on top of it (Cython cffi backend ?) From noreply at buildbot.pypy.org Sun Oct 5 21:03:05 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 5 Oct 2014 21:03:05 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Final (hopefully) version Message-ID: <20141005190305.127E51C023E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5423:a2242bbae59d Date: 2014-10-05 20:56 +0200 http://bitbucket.org/pypy/extradoc/changeset/a2242bbae59d/ Log: Final (hopefully) version diff --git a/talk/pyconie2014/talk.rst b/talk/pyconie2014/talk.rst --- a/talk/pyconie2014/talk.rst +++ b/talk/pyconie2014/talk.rst @@ -44,7 +44,7 @@ - Removes overhead when unnecessary -- But these Python features remain available (pdb) +- But Python features which need require overhead remain available (frame introspection, pdb) RPython ------- @@ -67,29 +67,38 @@ How --- +- Generates linear traces from loops + +- Specializes traces on types + - Removes boxing, integer objects become machine integers -- Specializes trace on types, helps speed-up method lookup - -- If the type of the object is different from the type in the trace, go back to the interpreter : "guard failure" +- If the type of the object is different from the type in the trace being executed, go back to the interpreter : "guard failure" - If a guard fails too many times, generate traces for the other types frequently encountered Compatibility ------------- -- Fully compatible with CPython 2.7 & 3.2 (minus bugs & implementation specific features) +- Fully compatible with CPython 2.7 & 3.2 (minus implementation details) - Partial and slow support of the C-API - Alternatives might exist +Future +------ + +- More Python 3 + +- NumPyPy + +- STM + Ecosystem --------- -- Just my opinion - -- We should move away from the C-API +- We should (slowly, incrementally) move away from the C extension API * Makes assumptions on refcounting, object layout, the GIL @@ -99,8 +108,10 @@ - This doesn't mean we should lose Python's ability to interface with C easily -CFFI ----- +- CFFI is the PyPy team's attempt at solving this + +CFFI (1/2) +---------- - Where do we go from here ? @@ -110,15 +121,22 @@ - Decently fast on CPython -- The Jython project is working on fast support +- The Jython project is working on support -CFFI ----- +CFFI (2/2) +---------- - More convenient, safer, faster than ctypes -- Python functions can be exposed to C easily +- Can call C functions easily, API and ABI mode + +- Python functions can be exposed to C - Already used by pyopenssl, psycopg2cffi, pygame_cffi, lxml_cffi - Other tools could be built on top of it (Cython cffi backend ?) + +Questions +--------- + +- Questions ? From noreply at buildbot.pypy.org Sun Oct 5 21:03:06 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 5 Oct 2014 21:03:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Merge heads Message-ID: <20141005190306.243491C023E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5424:12b314a72e1f Date: 2014-10-05 21:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/12b314a72e1f/ Log: Merge heads diff --git a/sprintinfo/warsaw-2014/announcement.txt b/sprintinfo/warsaw-2014/announcement.txt --- a/sprintinfo/warsaw-2014/announcement.txt +++ b/sprintinfo/warsaw-2014/announcement.txt @@ -38,8 +38,8 @@ ------------ The sprint will happen within a room of Warsaw University. The -address is Pasteura 5 (which is a form of "Pasteur street"), room 550. -The person of contact is Maciej Fijalkowski. +address is Pasteura 5 (which is a form of "Pasteur street"), dept. of +Physics, room 450. The person of contact is Maciej Fijalkowski. -------------- diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -9,7 +9,10 @@ ==================== ============== ======================= Name Arrive/Depart Accomodation ==================== ============== ======================= -Armin Rigo 20/10-2X/10 with fijal? +Armin Rigo 20/10-28/10 with fijal +Maciej Fijalkowski 20/10-30/10 private Romain Guillebert 19/10-26-10 ??? Manuel Jacob 20/10-26/10 ? (shared hotel room?) +Kostia Lopuhin +Antonio Cuni 20/10-26/10 ibis Reduta http://www.ibis.com/gb/hotel-7125-ibis-warszawa-reduta/index.shtml ==================== ============== ======================= From noreply at buildbot.pypy.org Sun Oct 5 21:03:07 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 5 Oct 2014 21:03:07 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Update accomodation Message-ID: <20141005190307.3F06C1C023E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5425:e020c4c36d7f Date: 2014-10-05 21:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/e020c4c36d7f/ Log: Update accomodation diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -11,8 +11,8 @@ ==================== ============== ======================= Armin Rigo 20/10-28/10 with fijal Maciej Fijalkowski 20/10-30/10 private -Romain Guillebert 19/10-26-10 ??? -Manuel Jacob 20/10-26/10 ? (shared hotel room?) +Romain Guillebert 19/10-26-10 ibis Reduta with mjacob +Manuel Jacob 20/10-26/10 ibis Reduta with rguillebert Kostia Lopuhin Antonio Cuni 20/10-26/10 ibis Reduta http://www.ibis.com/gb/hotel-7125-ibis-warszawa-reduta/index.shtml ==================== ============== ======================= From noreply at buildbot.pypy.org Sun Oct 5 22:11:15 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 5 Oct 2014 22:11:15 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Move the "future" slide to the end Message-ID: <20141005201115.526281C023E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5426:52815725aaa2 Date: 2014-10-05 22:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/52815725aaa2/ Log: Move the "future" slide to the end diff --git a/talk/pyconie2014/talk.rst b/talk/pyconie2014/talk.rst --- a/talk/pyconie2014/talk.rst +++ b/talk/pyconie2014/talk.rst @@ -86,15 +86,6 @@ - Alternatives might exist -Future ------- - -- More Python 3 - -- NumPyPy - -- STM - Ecosystem --------- @@ -136,6 +127,17 @@ - Other tools could be built on top of it (Cython cffi backend ?) +Future +------ + +- Python 3.3 + +- NumPyPy + +- STM + +- You can donate to help the progress of these features : pypy.org + Questions --------- From noreply at buildbot.pypy.org Sun Oct 5 22:28:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 5 Oct 2014 22:28:13 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add myself Message-ID: <20141005202813.BCC341C023E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: extradoc Changeset: r5427:bd935d2329f9 Date: 2014-10-05 23:28 +0300 http://bitbucket.org/pypy/extradoc/changeset/bd935d2329f9/ Log: add myself diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -15,4 +15,5 @@ Manuel Jacob 20/10-26/10 ibis Reduta with rguillebert Kostia Lopuhin Antonio Cuni 20/10-26/10 ibis Reduta http://www.ibis.com/gb/hotel-7125-ibis-warszawa-reduta/index.shtml +Matti Picus 20/10-20/10 just a long layover between flights ==================== ============== ======================= From noreply at buildbot.pypy.org Mon Oct 6 00:33:47 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 6 Oct 2014 00:33:47 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: kill AnnotatedValue; store the annotation directly on the Variable Message-ID: <20141005223347.BB1711C0F1D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73818:d41673491dc8 Date: 2014-10-05 23:33 +0100 http://bitbucket.org/pypy/pypy/changeset/d41673491dc8/ Log: kill AnnotatedValue; store the annotation directly on the Variable diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -11,7 +11,6 @@ from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature from rpython.annotator.argument import simple_args -from rpython.annotator.value import AnnotatedValue from rpython.annotator.bookkeeper import Bookkeeper import py @@ -150,9 +149,9 @@ if isinstance(variable, Constant): return type(variable.value) elif isinstance(variable, Variable): - cell = variable.binding - if cell: - return cell.ann.knowntype + s_variable = variable.annotation + if s_variable: + return s_variable.knowntype else: return object else: @@ -220,7 +219,7 @@ raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() - if v.binding is None: + if v.annotation is None: self.setbinding(v, annmodel.s_ImpossibleValue) # policy-dependent computation self.bookkeeper.compute_at_fixpoint() @@ -228,10 +227,7 @@ def annotation(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): - annvalue = arg.binding - if annvalue is None: - return None - return annvalue.ann + return arg.annotation elif isinstance(arg, Constant): return self.bookkeeper.immutablevalue(arg.value) else: @@ -244,27 +240,18 @@ raise KeyError return s_arg - def annvalue(self, arg): - if isinstance(arg, Variable): - return arg.binding - else: - return AnnotatedValue(arg, self.bookkeeper.immutablevalue(arg.value)) - def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) def setbinding(self, arg, s_value): - if arg.binding is None: - arg.binding = AnnotatedValue(arg, s_value) - else: - value = arg.binding - if value.ann is not None: - assert s_value.contains(value.ann) - value.ann = s_value + s_old = arg.annotation + if s_old is not None: + assert s_value.contains(s_old) + arg.annotation = s_value def transfer_binding(self, v_target, v_source): - assert v_source.binding is not None - v_target.binding = AnnotatedValue(v_target, v_source.binding.ann) + assert v_source.annotation is not None + v_target.annotation = v_source.annotation def warning(self, msg, pos=None): if pos is None: @@ -583,18 +570,16 @@ #___ creating the annotations based on operations ______ def consider_op(self, op): - argcells = [self.annvalue(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the # more general results invariant: e.g. if SomeImpossibleValue enters is_ # is_(SomeImpossibleValue, None) -> SomeBool # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg.ann, annmodel.SomeImpossibleValue): + for arg in op.args: + if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue): raise BlockedInference(self, op, -1) - resultcell = op.consider(self, *argcells) + resultcell = op.consider(self, *op.args) if resultcell is None: resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -26,8 +26,8 @@ @op.is_.register(SomeObject, SomeObject) def is__default(annotator, obj1, obj2): r = SomeBool() - s_obj1 = obj1.ann - s_obj2 = obj2.ann + s_obj1 = annotator.annotation(obj1) + s_obj2 = annotator.annotation(obj2) if s_obj2.is_constant(): if s_obj1.is_constant(): r.const = s_obj1.const is s_obj2.const @@ -39,18 +39,17 @@ knowntypedata = {} def bind(src_obj, tgt_obj): - if hasattr(tgt_obj.ann, 'is_type_of') and src_obj.ann.is_constant(): + if hasattr(annotator.annotation(tgt_obj), 'is_type_of') and annotator.annotation(src_obj).is_constant(): add_knowntypedata( knowntypedata, True, - tgt_obj.ann.is_type_of, - getbookkeeper().valueoftype(src_obj.ann.const)) - add_knowntypedata(knowntypedata, True, [tgt_obj.value], src_obj.ann) - s_nonnone = tgt_obj.ann - if (src_obj.ann.is_constant() and src_obj.ann.const is None and - tgt_obj.ann.can_be_none()): - s_nonnone = tgt_obj.ann.nonnoneify() - add_knowntypedata(knowntypedata, - False, [tgt_obj.value], s_nonnone) + annotator.annotation(tgt_obj).is_type_of, + getbookkeeper().valueoftype(annotator.annotation(src_obj).const)) + add_knowntypedata(knowntypedata, True, [tgt_obj], annotator.annotation(src_obj)) + s_nonnone = annotator.annotation(tgt_obj) + if (annotator.annotation(src_obj).is_constant() and annotator.annotation(src_obj).const is None and + annotator.annotation(tgt_obj).can_be_none()): + s_nonnone = annotator.annotation(tgt_obj).nonnoneify() + add_knowntypedata(knowntypedata, False, [tgt_obj], s_nonnone) bind(obj2, obj1) bind(obj1, obj2) @@ -60,7 +59,7 @@ def _make_cmp_annotator_default(cmp_op): @cmp_op.register(SomeObject, SomeObject) def default_annotate(annotator, obj1, obj2): - s_1, s_2 = obj1.ann, obj2.ann + s_1, s_2 = annotator.annotation(obj1), annotator.annotation(obj2) if s_1.is_immutable_constant() and s_2.is_immutable_constant(): return immutablevalue(cmp_op.pyfunc(s_1.const, s_2.const)) else: @@ -247,7 +246,7 @@ @cmp_op.register(SomeInteger, SomeInteger) def _compare_helper(annotator, int1, int2): r = SomeBool() - s_int1, s_int2 = int1.ann, int2.ann + s_int1, s_int2 = annotator.annotation(int1), annotator.annotation(int2) if s_int1.is_immutable_constant() and s_int2.is_immutable_constant(): r.const = cmp_op.pyfunc(s_int1.const, s_int2.const) # @@ -269,21 +268,21 @@ if s_int0.knowntype is bool: return int return s_int0.knowntype - if s_int1.nonneg and isinstance(int2.value, Variable): + if s_int1.nonneg and isinstance(int2, Variable): case = cmp_op.opname in ('lt', 'le', 'eq') - add_knowntypedata(knowntypedata, case, [int2.value], + add_knowntypedata(knowntypedata, case, [int2], SomeInteger(nonneg=True, knowntype=tointtype(s_int2))) - if s_int2.nonneg and isinstance(int1.value, Variable): + if s_int2.nonneg and isinstance(int1, Variable): case = cmp_op.opname in ('gt', 'ge', 'eq') - add_knowntypedata(knowntypedata, case, [int1.value], + add_knowntypedata(knowntypedata, case, [int1], SomeInteger(nonneg=True, knowntype=tointtype(s_int1))) r.set_knowntypedata(knowntypedata) # a special case for 'x < 0' or 'x >= 0', # where 0 is a flow graph Constant # (in this case we are sure that it cannot become a r_uint later) - if (isinstance(int2.value, Constant) and - type(int2.value.value) is int and # filter out Symbolics - int2.value.value == 0): + if (isinstance(int2, Constant) and + type(int2.value) is int and # filter out Symbolics + int2.value == 0): if s_int1.nonneg: if cmp_op.opname == 'lt': r.const = False @@ -718,9 +717,9 @@ def is__PBC_PBC(annotator, pbc1, pbc2): s = is__default(annotator, pbc1, pbc2) if not s.is_constant(): - if not pbc1.ann.can_be_None or not pbc2.ann.can_be_None: - for desc in pbc1.ann.descriptions: - if desc in pbc2.ann.descriptions: + if not annotator.annotation(pbc1).can_be_None or not annotator.annotation(pbc2).can_be_None: + for desc in annotator.annotation(pbc1).descriptions: + if desc in annotator.annotation(pbc2).descriptions: break else: s.const = False # no common desc in the two sets diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -861,7 +861,7 @@ all_vars = set().union(*[block.getvariables() for block in graph.iterblocks()]) print all_vars for var in all_vars: - s_value = var.binding.ann + s_value = var.annotation if isinstance(s_value, annmodel.SomeList): assert not s_value.listdef.listitem.resized assert not s_value.listdef.listitem.mutated diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -24,18 +24,18 @@ @op.type.register(SomeObject) def type_SomeObject(annotator, arg): r = SomeType() - r.is_type_of = [arg.value] + r.is_type_of = [arg] return r @op.bool.register(SomeObject) def bool_SomeObject(annotator, obj): r = SomeBool() - obj.ann.bool_behavior(r) - s_nonnone_obj = obj.ann + annotator.annotation(obj).bool_behavior(r) + s_nonnone_obj = annotator.annotation(obj) if s_nonnone_obj.can_be_none(): s_nonnone_obj = s_nonnone_obj.nonnoneify() knowntypedata = {} - add_knowntypedata(knowntypedata, True, [obj.value], s_nonnone_obj) + add_knowntypedata(knowntypedata, True, [obj], s_nonnone_obj) r.set_knowntypedata(knowntypedata) return r @@ -46,11 +46,11 @@ @op.simple_call.register(SomeObject) def simple_call_SomeObject(annotator, func, *args): - return func.ann.call(simple_args([arg.ann for arg in args])) + return annotator.annotation(func).call(simple_args([annotator.annotation(arg) for arg in args])) @op.call_args.register(SomeObject) def call_args(annotator, func, *args): - return func.ann.call(complex_args([arg.ann for arg in args])) + return annotator.annotation(func).call(complex_args([annotator.annotation(arg) for arg in args])) class __extend__(SomeObject): @@ -248,7 +248,7 @@ @op.contains.register(SomeList) def contains_SomeList(annotator, obj, element): - obj.ann.listdef.generalize(element.ann) + annotator.annotation(obj).listdef.generalize(annotator.annotation(element)) return s_Bool contains_SomeList.can_only_throw = [] @@ -345,8 +345,8 @@ @op.contains.register(SomeDict) def contains_SomeDict(annotator, dct, element): - dct.ann.dictdef.generalize_key(element.ann) - if dct.ann._is_empty(): + annotator.annotation(dct).dictdef.generalize_key(annotator.annotation(element)) + if annotator.annotation(dct)._is_empty(): s_bool = SomeBool() s_bool.const = False return s_bool @@ -437,11 +437,11 @@ @op.contains.register(SomeString) @op.contains.register(SomeUnicodeString) def contains_String(annotator, string, char): - if char.ann.is_constant() and char.ann.const == "\0": + if annotator.annotation(char).is_constant() and annotator.annotation(char).const == "\0": r = SomeBool() knowntypedata = {} - add_knowntypedata(knowntypedata, False, [string.value], - string.ann.nonnulify()) + add_knowntypedata(knowntypedata, False, [string], + annotator.annotation(string).nonnulify()) r.set_knowntypedata(knowntypedata) return r else: diff --git a/rpython/annotator/value.py b/rpython/annotator/value.py deleted file mode 100644 --- a/rpython/annotator/value.py +++ /dev/null @@ -1,9 +0,0 @@ -""" AnnotatedValue """ - -class AnnotatedValue(object): - def __init__(self, value, annotation): - self.value = value - self.ann = annotation - - def __repr__(self): - return "AnnotatedValue(%s, %r)" % (self.value, self.ann) diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -250,7 +250,7 @@ class Variable(object): - __slots__ = ["_name", "_nr", "binding", "concretetype"] + __slots__ = ["_name", "_nr", "annotation", "concretetype"] dummyname = 'v' namesdict = {dummyname: (dummyname, 0)} @@ -273,7 +273,7 @@ def __init__(self, name=None): self._name = self.dummyname self._nr = -1 - self.binding = None + self.annotation = None # numbers are bound lazily, when the name is requested if name is not None: self.rename(name) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -97,7 +97,7 @@ return None def consider(self, annotator, *args): - args_s = [arg.ann for arg in args] + args_s = [annotator.annotation(arg) for arg in args] spec = type(self).get_specialization(*args_s) return spec(annotator, *args) @@ -166,7 +166,7 @@ raise AnnotatorError("Unknown operation") def get_can_only_throw(self, annotator): - args_s = [annotator.binding(v) for v in self.args] + args_s = [annotator.annotation(v) for v in self.args] spec = type(self).get_specialization(*args_s) return read_can_only_throw(spec, args_s[0]) @@ -176,7 +176,7 @@ impl = getattr(s_arg, cls.opname) def specialized(annotator, arg, *other_args): - return impl(*[x.ann for x in other_args]) + return impl(*[annotator.annotation(x) for x in other_args]) try: specialized.can_only_throw = impl.can_only_throw except AttributeError: @@ -202,7 +202,7 @@ impl = getattr(pair(s_arg1, s_arg2), cls.opname) def specialized(annotator, arg1, arg2, *other_args): - return impl(*[x.ann for x in other_args]) + return impl(*[annotator.annotation(x) for x in other_args]) try: specialized.can_only_throw = impl.can_only_throw except AttributeError: @@ -212,7 +212,7 @@ return cls._registry[type(s_arg1), type(s_arg2)] def get_can_only_throw(self, annotator): - args_s = [annotator.binding(v) for v in self.args] + args_s = [annotator.annotation(v) for v in self.args] spec = type(self).get_specialization(*args_s) return read_can_only_throw(spec, args_s[0], args_s[1]) @@ -457,7 +457,7 @@ canraise = [] def consider(self, annotator, *args): - return SomeTuple(items=[arg.ann for arg in args]) + return SomeTuple(items=[annotator.annotation(arg) for arg in args]) class NewList(HLOperation): @@ -465,7 +465,7 @@ canraise = [] def consider(self, annotator, *args): - return annotator.bookkeeper.newlist(*[arg.ann for arg in args]) + return annotator.bookkeeper.newlist(*[annotator.annotation(arg) for arg in args]) class Pow(PureOperation): diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -19,7 +19,7 @@ def getrresult(rtyper, graph): """Return the repr of the result variable of the 'graph'.""" - if graph.getreturnvar().binding is not None: + if graph.getreturnvar().annotation is not None: return rtyper.bindingrepr(graph.getreturnvar()) else: return lltype.Void diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -496,7 +496,7 @@ hop.r_result, op.opname, resulttype)) # figure out if the resultvar is a completely fresh Variable or not if (isinstance(resultvar, Variable) and - resultvar.binding is None and + resultvar.annotation is None and resultvar not in varmapping): # fresh Variable: rename it to the previously existing op.result varmapping[resultvar] = op.result diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -124,14 +124,14 @@ elif op.opname == 'contains' and op.args[0] in newlist_sources: items = {} for v in newlist_sources[op.args[0]]: - s = self.binding(v) + s = self.annotation(v) if not s.is_immutable_constant(): break items[s.const] = None else: # all arguments of the newlist are annotation constants op.args[0] = Constant(items) - s_dict = self.binding(op.args[0]) + s_dict = self.annotation(op.args[0]) s_dict.dictdef.generalize_key(self.binding(op.args[1])) @@ -168,9 +168,9 @@ "Fix a block whose end can never be reached at run-time." # search the operation that cannot succeed can_succeed = [op for op in block.operations - if op.result.binding is not None] + if op.result.annotation is not None] cannot_succeed = [op for op in block.operations - if op.result.binding is None] + if op.result.annotation is None] n = len(can_succeed) # check consistency assert can_succeed == block.operations[:n] diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -6,8 +6,7 @@ """Make a copy of the Variable v, preserving annotations and concretetype.""" assert isinstance(v, Variable) newvar = Variable(v) - if annotator is not None and v.binding is not None: - annotator.transfer_binding(newvar, v) + newvar.annotation = v.annotation if hasattr(v, 'concretetype'): newvar.concretetype = v.concretetype return newvar From noreply at buildbot.pypy.org Mon Oct 6 05:12:04 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 6 Oct 2014 05:12:04 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: replace unsimplify.copyvar() with Variable.copy() Message-ID: <20141006031204.351691C0EC8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73819:831df4bed8a4 Date: 2014-10-06 01:57 +0100 http://bitbucket.org/pypy/pypy/changeset/831df4bed8a4/ Log: replace unsimplify.copyvar() with Variable.copy() diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -312,6 +312,15 @@ def foldable(self): return False + def copy(self): + """Make a copy of the Variable, preserving annotations and concretetype.""" + newvar = Variable(self) + newvar.annotation = self.annotation + if hasattr(self, 'concretetype'): + newvar.concretetype = self.concretetype + return newvar + + class Constant(Hashable): __slots__ = ["concretetype"] diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -10,7 +10,7 @@ BaseFrameworkGCTransformer, BaseRootWalker) from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import copyvar, varoftype +from rpython.translator.unsimplify import varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo import sys @@ -140,7 +140,7 @@ block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): - v = copyvar(None, v) + v = v.copy() if isinstance(v.concretetype, lltype.Ptr): w = varoftype(llmemory.Address) else: diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -8,7 +8,7 @@ from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.support import log, find_loop_blocks from rpython.translator.simplify import join_blocks, cleanup_graph, get_graph -from rpython.translator.unsimplify import copyvar, split_block +from rpython.translator.unsimplify import split_block class CannotInline(Exception): @@ -236,14 +236,13 @@ if isinstance(var, Constant): return var if var not in self.varmap: - self.varmap[var] = copyvar(None, var) + self.varmap[var] = var.copy() return self.varmap[var] def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] - result = [copyvar(None, var) - for var in self.original_passon_vars] + result = [var.copy() for var in self.original_passon_vars] self._passon_vars[cache_key] = result return result @@ -362,8 +361,8 @@ exc_match.concretetype = typeOf(exc_match.value) blocks = [] for i, link in enumerate(afterblock.exits[1:]): - etype = copyvar(None, copiedexceptblock.inputargs[0]) - evalue = copyvar(None, copiedexceptblock.inputargs[1]) + etype = copiedexceptblock.inputargs[0].copy() + evalue = copiedexceptblock.inputargs[1].copy() passon_vars = self.passon_vars(i) block = Block([etype, evalue] + passon_vars) res = Variable() diff --git a/rpython/translator/backendopt/ssa.py b/rpython/translator/backendopt/ssa.py --- a/rpython/translator/backendopt/ssa.py +++ b/rpython/translator/backendopt/ssa.py @@ -158,8 +158,6 @@ 'graph_or_blocks' can be a graph, or just a dict that lists some blocks from a graph, as follows: {block: reachable-from-outside-flag}. """ - from rpython.translator.unsimplify import copyvar - entrymap = mkinsideentrymap(graph_or_blocks) builder = DataFlowFamilyBuilder(graph_or_blocks) variable_families = builder.get_variable_families() @@ -203,7 +201,7 @@ except KeyError: raise Exception("SSA_to_SSI failed: no way to give a value to" " %r in %r" % (v, block)) - w = copyvar(annotator, v) + w = v.copy() variable_families.union(v, w) block.renamevariables({v: w}) block.inputargs.append(w) diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -1,5 +1,5 @@ from rpython.translator.simplify import join_blocks, cleanup_graph -from rpython.translator.unsimplify import copyvar, varoftype +from rpython.translator.unsimplify import varoftype from rpython.translator.unsimplify import insert_empty_block, split_block from rpython.translator.backendopt import canraise, inline from rpython.flowspace.model import Block, Constant, Variable, Link, \ @@ -305,8 +305,7 @@ reraise = self.comes_from_last_exception(entrymap, link) result = Variable() result.concretetype = lltype.Void - block = Block([copyvar(None, v) - for v in graph.exceptblock.inputargs]) + block = Block([v.copy() for v in graph.exceptblock.inputargs]) if reraise: block.operations = [ SpaceOperation("direct_call", @@ -345,7 +344,7 @@ inlined, the correct exception matching blocks are produced.""" # XXX slightly annoying: construct a graph by hand # but better than the alternative - result = copyvar(None, op.result) + result = op.result.copy() opargs = [] inputargs = [] callargs = [] @@ -435,7 +434,7 @@ result_i = l0.args.index(v_result) v_result_after = normalafterblock.inputargs[result_i] else: - v_result_after = copyvar(None, v_result) + v_result_after = v_result.copy() l0.args.append(v_result) normalafterblock.inputargs.append(v_result_after) if true_zero: diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -2,15 +2,6 @@ SpaceOperation, c_last_exception, checkgraph) -def copyvar(annotator, v): - """Make a copy of the Variable v, preserving annotations and concretetype.""" - assert isinstance(v, Variable) - newvar = Variable(v) - newvar.annotation = v.annotation - if hasattr(v, 'concretetype'): - newvar.concretetype = v.concretetype - return newvar - def varoftype(concretetype, name=None): var = Variable(name) var.concretetype = concretetype @@ -30,7 +21,7 @@ vars = [v for v, keep in vars.items() if keep] mapping = {} for v in vars: - mapping[v] = copyvar(annotator, v) + mapping[v] = v.copy() newblock = Block(vars) newblock.operations.extend(newops) newblock.closeblock(Link(link.args, link.target)) @@ -40,7 +31,7 @@ return newblock def insert_empty_startblock(annotator, graph): - vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] + vars = [v.copy() for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) graph.startblock = newblock @@ -71,7 +62,7 @@ if var in vars_produced_in_new_block: return var if var not in varmap: - varmap[var] = copyvar(annotator, var) + varmap[var] = var.copy() return varmap[var] moved_operations = block.operations[index:] new_moved_ops = [] @@ -145,7 +136,7 @@ annhelper.finish() entry_point = translator.entry_point_graph - args = [copyvar(translator.annotator, v) for v in entry_point.getargs()] + args = [v.copy() for v in entry_point.getargs()] extrablock = Block(args) v_none = varoftype(lltype.Void) newop = SpaceOperation('direct_call', [c_initial_func], v_none) @@ -168,7 +159,7 @@ annhelper.finish() entry_point = translator.entry_point_graph - v = copyvar(translator.annotator, entry_point.getreturnvar()) + v = entry_point.getreturnvar().copy() extrablock = Block([v]) v_none = varoftype(lltype.Void) newop = SpaceOperation('direct_call', [c_final_func], v_none) From noreply at buildbot.pypy.org Mon Oct 6 05:12:05 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 6 Oct 2014 05:12:05 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: fix dotviewer Message-ID: <20141006031205.82F811C0EC8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73820:433946e5b1de Date: 2014-10-06 04:04 +0100 http://bitbucket.org/pypy/pypy/changeset/433946e5b1de/ Log: fix dotviewer diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -122,8 +122,8 @@ for v in link.getextravars(): vars[v] = True for var in vars: - if var.binding is not None: - s_value = var.binding.ann + s_value = var.annotation + if s_value is not None: info = '%s: %s' % (var.name, s_value) annotationcolor = getattr(s_value, 'annotationcolor', None) self.links[var.name] = info, annotationcolor From noreply at buildbot.pypy.org Mon Oct 6 07:03:02 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 6 Oct 2014 07:03:02 +0200 (CEST) Subject: [pypy-commit] pypy default: test for valid fd earlier Message-ID: <20141006050302.2FEF41C0548@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73821:ab66c5140bd6 Date: 2014-10-06 00:09 +0300 http://bitbucket.org/pypy/pypy/changeset/ab66c5140bd6/ Log: test for valid fd earlier diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -116,6 +116,8 @@ validate_fd(fileno(fp)) return _feof(fp) +def is_valid_fp(fp): + return is_valid_fd(fileno(fp)) constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, - cpython_struct) + cpython_struct, is_valid_fp) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject @@ -154,6 +154,10 @@ source = "" filename = rffi.charp2str(filename) buf = lltype.malloc(rffi.CCHARP.TO, BUF_SIZE, flavor='raw') + if not is_valid_fp(fp): + PyErr_SetFromErrno(space, space.w_IOError) + lltype.free(buf, flavor='raw') + return None try: while True: count = fread(buf, 1, BUF_SIZE, fp) diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -89,12 +89,12 @@ rffi.free_charp(buf) assert 0 == run("42 * 43") - + assert -1 == run("4..3 * 43") - + assert api.PyErr_Occurred() api.PyErr_Clear() - + def test_run_string(self, space, api): def run(code, start, w_globals, w_locals): buf = rffi.str2charp(code) From noreply at buildbot.pypy.org Mon Oct 6 07:03:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 6 Oct 2014 07:03:03 +0200 (CEST) Subject: [pypy-commit] pypy default: change order Message-ID: <20141006050303.5C14A1C0548@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r73822:5c0e512cf6dc Date: 2014-10-06 08:00 +0300 http://bitbucket.org/pypy/pypy/changeset/5c0e512cf6dc/ Log: change order diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -155,8 +155,8 @@ filename = rffi.charp2str(filename) buf = lltype.malloc(rffi.CCHARP.TO, BUF_SIZE, flavor='raw') if not is_valid_fp(fp): + lltype.free(buf, flavor='raw') PyErr_SetFromErrno(space, space.w_IOError) - lltype.free(buf, flavor='raw') return None try: while True: From noreply at buildbot.pypy.org Mon Oct 6 10:38:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 10:38:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1880: test and fix Message-ID: <20141006083842.55BDE1C0548@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73823:c8871c379375 Date: 2014-10-06 10:38 +0200 http://bitbucket.org/pypy/pypy/changeset/c8871c379375/ Log: Issue #1880: test and fix diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -402,8 +402,10 @@ The value argument can either be an integer or a string. """ try: - optval = space.int_w(w_optval) - except: + optval = space.c_int_w(w_optval) + except OperationError, e: + if e.async(space): + raise optval = space.str_w(w_optval) try: self.sock.setsockopt(level, optname, optval) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -498,6 +498,13 @@ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 + # + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 31) + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 32 + 1) + assert s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0 + # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse != 0 From noreply at buildbot.pypy.org Mon Oct 6 10:42:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 10:42:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1881: tentative fix Message-ID: <20141006084258.A33AE1C0548@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73824:7ee38900bceb Date: 2014-10-06 10:42 +0200 http://bitbucket.org/pypy/pypy/changeset/7ee38900bceb/ Log: Issue #1881: tentative fix diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -134,8 +134,8 @@ if _CYGWIN: # XXX: macro=True hack for newer versions of Cygwin (as of 12/2012) - c_malloc, _ = external('malloc', [size_t], PTR, macro=True) - c_free, _ = external('free', [PTR], lltype.Void, macro=True) + _, c_malloc_safe = external('malloc', [size_t], PTR, macro=True) + _, c_free_safe = external('free', [PTR], lltype.Void, macro=True) c_memmove, _ = external('memmove', [PTR, PTR, size_t], lltype.Void) @@ -709,7 +709,7 @@ # XXX: JIT memory should be using mmap MAP_PRIVATE with # PROT_EXEC but Cygwin's fork() fails. mprotect() # cannot be used, but seems to be unnecessary there. - res = c_malloc(map_size) + res = c_malloc_safe(map_size) if res == rffi.cast(PTR, 0): raise MemoryError return res @@ -726,7 +726,7 @@ alloc._annenforceargs_ = (int,) if _CYGWIN: - free = c_free + free = c_free_safe else: free = c_munmap_safe From noreply at buildbot.pypy.org Mon Oct 6 15:11:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 15:11:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Update Message-ID: <20141006131148.408491C0469@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73825:7ea8a80131f3 Date: 2014-10-06 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/7ea8a80131f3/ Log: Update diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -121,6 +121,28 @@ there are in PyPy-STM crashes related to markers (with the JIT?). Also, some markers logic is missing with the JIT. +------------------------------------------------------------ + +GC: call __del__() + +------------------------------------------------------------ + +look at jit.elidables as a way to specify "this function can run +earlier, in a separate transaction". Useful to avoid pointless +conflicts in cases the jit.edliable update some cache, like with +MapDictStrategy. + +------------------------------------------------------------ + +dicts: have an implementation that follows the principles in +stmgc/hashtable/design.txt + +------------------------------------------------------------ + +replace "atomic transactions" with better management of thread.locks. + +------------------------------------------------------------ + @@ -211,10 +233,6 @@ ------------------------------------------------------------ -GC: major collections; call __del__() - ------------------------------------------------------------- - JIT: finish (missing: the call in execute_token(), reorganize pypy source, ?) ------------------------------------------------------------ From noreply at buildbot.pypy.org Mon Oct 6 15:22:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 15:22:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: A simple check for consistency, should fail at some point if the format doesn't match Message-ID: <20141006132227.EDB751C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73826:176c74c9bf1f Date: 2014-10-06 15:22 +0200 http://bitbucket.org/pypy/pypy/changeset/176c74c9bf1f/ Log: A simple check for consistency, should fail at some point if the format doesn't match diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -30,6 +30,9 @@ STM_GC_MAJOR_START = 13 STM_GC_MAJOR_DONE = 14 +_STM_EVENT_N = 15 + + event_name = {} for _key, _value in globals().items(): if _key.startswith('STM_'): @@ -62,6 +65,8 @@ if not packet: break sec, nsec, threadnum, otherthreadnum, event, len1, len2 = \ struct.unpack("IIIIBBB", packet) + if event >= _STM_EVENT_N: + raise ValueError("the file %r appears corrupted") m1 = f.read(len1) m2 = f.read(len2) result.append(LogEntry(sec + 0.000000001 * nsec, From noreply at buildbot.pypy.org Mon Oct 6 16:00:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 16:00:05 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Branch to implement finalizers Message-ID: <20141006140005.5AC011D37BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1459:6997a6d94dfd Date: 2014-10-06 15:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/6997a6d94dfd/ Log: Branch to implement finalizers From noreply at buildbot.pypy.org Mon Oct 6 16:00:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 16:00:06 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Start on light finalizers Message-ID: <20141006140006.7064A1D37BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1460:97dc15596c92 Date: 2014-10-06 15:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/97dc15596c92/ Log: Start on light finalizers diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -198,6 +198,9 @@ /* marker where this thread became inevitable */ stm_loc_marker_t marker_inev; + + /* Lightweight finalizers */ + struct list_s *young_objects_with_light_finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.c @@ -0,0 +1,20 @@ + + +void (*stmcb_light_finalizer)(object_t *); + +void stm_enable_light_finalizer(object_t *obj) +{ + STM_PSEGMENT->young_objects_with_light_finalizers = list_append( + STM_PSEGMENT->young_objects_with_light_finalizers, (uintptr_t)obj); +} + +static void deal_with_young_objects_with_finalizers(void) +{ + struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers; + long i, count = list_count(lst); + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + stmcb_light_finalizer(obj); + } + list_clear(lst); +} diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.h @@ -0,0 +1,2 @@ + +static void deal_with_young_objects_with_finalizers(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -559,6 +559,7 @@ /* now all surviving nursery objects have been moved out */ stm_move_young_weakrefs(); + deal_with_young_objects_with_finalizers(); throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -128,6 +128,7 @@ pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_commit_and_abort[0] = tree_create(); pr->callbacks_on_commit_and_abort[1] = tree_create(); + pr->young_objects_with_light_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -169,6 +170,7 @@ tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_commit_and_abort[0]); tree_free(pr->callbacks_on_commit_and_abort[1]); + list_free(pr->young_objects_with_light_finalizers); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -16,6 +16,7 @@ #include "stm/weakref.h" #include "stm/marker.h" #include "stm/prof.h" +#include "stm/finalizer.h" #include "stm/misc.c" #include "stm/list.c" @@ -37,3 +38,4 @@ #include "stm/marker.c" #include "stm/prof.c" #include "stm/rewind_setjmp.c" +#include "stm/finalizer.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -505,6 +505,13 @@ } while (0) +/* Support for light finalizers. This is a simple version of + finalizers that guarantees not to do anything fancy, like not + resurrecting objects. */ +void (*stmcb_light_finalizer)(object_t *); +void stm_enable_light_finalizer(object_t *); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -158,6 +158,9 @@ void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); + +void (*stmcb_light_finalizer)(object_t *); +void stm_enable_light_finalizer(object_t *); """) diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py new file mode 100644 --- /dev/null +++ b/c7/test/test_finalizer.py @@ -0,0 +1,33 @@ +from support import * +import py + + +class TestFinalizer(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def light_finalizer(obj): + self.light_finalizers_called.append(obj) + self.light_finalizers_called = [] + lib.stmcb_light_finalizer = light_finalizer + self._light_finalizer_keepalive = light_finalizer + + def expect_finalized(self, objs): + assert self.light_finalizers_called == objs + self.light_finalizers_called = [] + + def test_no_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + self.commit_transaction() + self.expect_finalized([]) + + def test_young_light_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.expect_finalized([]) + self.commit_transaction() + self.expect_finalized([lp1]) From noreply at buildbot.pypy.org Mon Oct 6 16:11:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 16:11:25 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: in-progress Message-ID: <20141006141125.67EA81C0469@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1461:453148969a4a Date: 2014-10-06 16:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/453148969a4a/ Log: in-progress diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -14,7 +14,14 @@ long i, count = list_count(lst); for (i = 0; i < count; i++) { object_t* obj = (object_t *)list_item(lst, i); - stmcb_light_finalizer(obj); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + if (pforwarded_array[0] != GCWORD_MOVED) { + /* not moved: the object dies */ + stmcb_light_finalizer(obj); + } + else { + /*...*/ + } } list_clear(lst); } diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -31,3 +31,11 @@ self.expect_finalized([]) self.commit_transaction() self.expect_finalized([lp1]) + + def test_young_light_finalizer_dont_die(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) # stays alive + self.commit_transaction() + self.expect_finalized([]) From noreply at buildbot.pypy.org Mon Oct 6 16:17:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 16:17:21 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: in-progress Message-ID: <20141006141721.EBC381C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1462:9541076ea2ae Date: 2014-10-06 16:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/9541076ea2ae/ Log: in-progress diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -201,6 +201,7 @@ /* Lightweight finalizers */ struct list_s *young_objects_with_light_finalizers; + struct list_s *old_objects_with_light_finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -4,8 +4,14 @@ void stm_enable_light_finalizer(object_t *obj) { - STM_PSEGMENT->young_objects_with_light_finalizers = list_append( - STM_PSEGMENT->young_objects_with_light_finalizers, (uintptr_t)obj); + if (_is_young(obj)) { + STM_PSEGMENT->young_objects_with_light_finalizers = list_append( + STM_PSEGMENT->young_objects_with_light_finalizers, (uintptr_t)obj); + } + else { + STM_PSEGMENT->old_objects_with_light_finalizers = list_append( + STM_PSEGMENT->old_objects_with_light_finalizers, (uintptr_t)obj); + } } static void deal_with_young_objects_with_finalizers(void) @@ -14,13 +20,18 @@ long i, count = list_count(lst); for (i = 0; i < count; i++) { object_t* obj = (object_t *)list_item(lst, i); + assert(_is_young(obj)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; if (pforwarded_array[0] != GCWORD_MOVED) { /* not moved: the object dies */ stmcb_light_finalizer(obj); } else { - /*...*/ + obj = pforwarded_array[1]; /* moved location */ + assert(!_is_young(obj)); + STM_PSEGMENT->old_objects_with_light_finalizers = list_append( + STM_PSEGMENT->old_objects_with_light_finalizers, (uintptr_t)obj); } } list_clear(lst); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -129,6 +129,7 @@ pr->callbacks_on_commit_and_abort[0] = tree_create(); pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->young_objects_with_light_finalizers = list_create(); + pr->old_objects_with_light_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -171,6 +172,7 @@ tree_free(pr->callbacks_on_commit_and_abort[0]); tree_free(pr->callbacks_on_commit_and_abort[1]); list_free(pr->young_objects_with_light_finalizers); + list_free(pr->old_objects_with_light_finalizers); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -39,3 +39,13 @@ self.push_root(lp1) # stays alive self.commit_transaction() self.expect_finalized([]) + + def test_old_light_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + lib.stm_enable_light_finalizer(lp1) + self.commit_transaction() + self.expect_finalized([]) From noreply at buildbot.pypy.org Mon Oct 6 16:30:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 16:30:32 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Major GC. Message-ID: <20141006143032.2D4571C0F1D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1463:a02c1ad79557 Date: 2014-10-06 16:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/a02c1ad79557/ Log: Major GC. diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -36,3 +36,30 @@ } list_clear(lst); } + +static void deal_with_old_objects_with_finalizers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + + struct list_s *lst = pseg->old_objects_with_light_finalizers; + long i, count = list_count(lst); + lst->count = 0; + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + if (!mark_visited_test(obj)) { + /* not marked: object dies */ + /* we're calling the light finalizer is a random thread, + but it should work, because it was dead already at the + start of that thread's transaction, so any thread should + see the same, old content */ + stmcb_light_finalizer(obj); + } + else { + /* object survives */ + list_set_item(lst, lst->count++, (uintptr_t)obj); + } + } + } +} diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h --- a/c7/stm/finalizer.h +++ b/c7/stm/finalizer.h @@ -1,2 +1,3 @@ static void deal_with_young_objects_with_finalizers(void); +static void deal_with_old_objects_with_finalizers(void); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -626,8 +626,9 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); - /* weakrefs: */ + /* weakrefs and old light finalizers */ stm_visit_old_weakrefs(); + deal_with_old_objects_with_finalizers(); /* cleanup */ clean_up_segment_lists(); diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -32,7 +32,7 @@ self.commit_transaction() self.expect_finalized([lp1]) - def test_young_light_finalizer_dont_die(self): + def test_young_light_finalizer_survives(self): self.start_transaction() lp1 = stm_allocate(48) lib.stm_enable_light_finalizer(lp1) @@ -49,3 +49,27 @@ lib.stm_enable_light_finalizer(lp1) self.commit_transaction() self.expect_finalized([]) + + def test_old_light_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + self.expect_finalized([]) + stm_major_collect() + self.expect_finalized([lp1]) + self.commit_transaction() + + def test_old_light_finalizer_survives(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + self.push_root(lp1) + stm_major_collect() + self.commit_transaction() + self.expect_finalized([]) From noreply at buildbot.pypy.org Mon Oct 6 17:39:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 17:39:29 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Copying directly the algorithm from pypy's minimark.py. Compiles but not Message-ID: <20141006153929.191631C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1464:b18fbe29f7f7 Date: 2014-10-06 17:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/b18fbe29f7f7/ Log: Copying directly the algorithm from pypy's minimark.py. Compiles but not tested so far (and missing ways to register non-light finalizers). diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -199,9 +199,10 @@ /* marker where this thread became inevitable */ stm_loc_marker_t marker_inev; - /* Lightweight finalizers */ + /* finalizers */ struct list_s *young_objects_with_light_finalizers; struct list_s *old_objects_with_light_finalizers; + struct list_s *objects_with_finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -4,18 +4,15 @@ void stm_enable_light_finalizer(object_t *obj) { - if (_is_young(obj)) { - STM_PSEGMENT->young_objects_with_light_finalizers = list_append( - STM_PSEGMENT->young_objects_with_light_finalizers, (uintptr_t)obj); - } - else { - STM_PSEGMENT->old_objects_with_light_finalizers = list_append( - STM_PSEGMENT->old_objects_with_light_finalizers, (uintptr_t)obj); - } + if (_is_young(obj)) + LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj); + else + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); } static void deal_with_young_objects_with_finalizers(void) { + /* for light finalizers */ struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers; long i, count = list_count(lst); for (i = 0; i < count; i++) { @@ -30,8 +27,7 @@ else { obj = pforwarded_array[1]; /* moved location */ assert(!_is_young(obj)); - STM_PSEGMENT->old_objects_with_light_finalizers = list_append( - STM_PSEGMENT->old_objects_with_light_finalizers, (uintptr_t)obj); + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); } } list_clear(lst); @@ -39,6 +35,7 @@ static void deal_with_old_objects_with_finalizers(void) { + /* for light finalizers */ long j; for (j = 1; j <= NB_SEGMENTS; j++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(j); @@ -63,3 +60,131 @@ } } } + + +/************************************************************/ +/* Algorithm for regular (non-light) finalizers. + Follows closely pypy/doc/discussion/finalizer-order.rst + as well as rpython/memory/gc/minimark.py. +*/ + +static inline int _finalization_state(object_t *obj) +{ + /* Returns the state, "0", 1, 2 or 3, as per finalizer-order.rst. + One difference is that the official state 0 is returned here + as a number that is <= 0. */ + uintptr_t lock_idx = mark_loc(obj); + return write_locks[lock_idx] - (WL_FINALIZ_ORDER_1 - 1); +} + +static void _bump_finalization_state_from_0_to_1(object_t *obj) +{ + uintptr_t lock_idx = mark_loc(obj); + assert(write_locks[lock_idx] < WL_FINALIZ_ORDER_1); + write_locks[lock_idx] = WL_FINALIZ_ORDER_1; +} + +static struct list_s *_finalizer_tmpstack; + +static inline void append_to_finalizer_tmpstack(object_t **pobj) +{ + object_t *obj = *pobj; + if (obj != NULL) + LIST_APPEND(_finalizer_tmpstack, obj); +} + +static void _recursively_bump_finalization_state(object_t *obj, int from_state, + struct list_s *tmpstack) +{ + assert(_finalization_state(obj) == from_state); + assert(list_is_empty(tmpstack)); + _finalizer_tmpstack = tmpstack; + + while (1) { + if (_finalization_state(obj) == from_state) { + /* bump to the next state */ + write_locks[mark_loc(obj)]++; + + /* trace */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); + stmcb_trace(realobj, &append_to_finalizer_tmpstack); + } + + if (list_is_empty(tmpstack)) + break; + + obj = (object_t *)list_pop_item(tmpstack); + } +} + +static void deal_with_objects_with_finalizers(void) +{ + /* for non-light finalizers */ + + /* there is one 'objects_with_finalizers' list per segment, but it + doesn't really matter: all objects are considered equal, and if + they survive, they are added again into one list that is attached + at the end to an arbitrary segment. */ + struct list_s *new_with_finalizer = list_create(); + struct list_s *marked = list_create(); + struct list_s *pending = list_create(); + struct list_s *tmpstack = list_create(); + + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + + struct list_s *lst = pseg->objects_with_finalizers; + long i, count = list_count(lst); + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(lst, i); + + assert(_finalization_state(x) != 1); + if (_finalization_state(x) >= 2) { + LIST_APPEND(new_with_finalizer, x); + continue; + } + LIST_APPEND(marked, x); + LIST_APPEND(pending, x); + while (!list_is_empty(pending)) { + object_t *y = (object_t *)list_pop_item(pending); + int state = _finalization_state(y); + if (state <= 0) { + _bump_finalization_state_from_0_to_1(y); + /* trace into the 'pending' list */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, y); + _finalizer_tmpstack = pending; + stmcb_trace(realobj, &append_to_finalizer_tmpstack); + } + else if (state == 2) { + _recursively_bump_finalization_state(y, 2, tmpstack); + } + } + _recursively_bump_finalization_state(x, 1, tmpstack); + } + list_clear(lst); + } + + long i, count = list_count(marked); + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(marked, i); + + int state = _finalization_state(x); + assert(state >= 2); + if (state == 2) { + LIST_APPEND(run_finalizers, x); + _recursively_bump_finalization_state(x, 2, tmpstack); + } + else { + LIST_APPEND(new_with_finalizer, x); + } + } + + list_free(tmpstack); + list_free(pending); + list_free(marked); + list_free(get_priv_segment(1)->objects_with_finalizers); + get_priv_segment(1)->objects_with_finalizers = new_with_finalizer; +} diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h --- a/c7/stm/finalizer.h +++ b/c7/stm/finalizer.h @@ -1,3 +1,6 @@ static void deal_with_young_objects_with_finalizers(void); static void deal_with_old_objects_with_finalizers(void); +static void deal_with_objects_with_finalizers(void); + +static struct list_s *run_finalizers; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -161,7 +161,11 @@ static struct list_s *mark_objects_to_trace; -#define WL_VISITED 255 +#define WL_FINALIZ_ORDER_1 253 +#define WL_FINALIZ_ORDER_2 254 +#define WL_FINALIZ_ORDER_3 WL_VISITED + +#define WL_VISITED 255 static inline uintptr_t mark_loc(object_t *obj) @@ -626,6 +630,11 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); + /* finalizer support: will mark as WL_VISITED all objects with a + finalizer and all objects reachable from there, and also moves + some objects from 'objects_with_finalizers' to 'run_finalizers'. */ + deal_with_objects_with_finalizers(); + /* weakrefs and old light finalizers */ stm_visit_old_weakrefs(); deal_with_old_objects_with_finalizers(); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -113,7 +113,7 @@ /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + assert(1 <= i && i < 253); /* 253 is WL_FINALIZ_ORDER_1 in gcpage.c */ pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; @@ -130,10 +130,12 @@ pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->young_objects_with_light_finalizers = list_create(); pr->old_objects_with_light_finalizers = list_create(); + pr->objects_with_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; } + run_finalizers = list_create(); /* The pages are shared lazily, as remap_file_pages() takes a relatively long time for each page. @@ -173,7 +175,9 @@ tree_free(pr->callbacks_on_commit_and_abort[1]); list_free(pr->young_objects_with_light_finalizers); list_free(pr->old_objects_with_light_finalizers); + list_free(pr->objects_with_finalizers); } + list_free(run_finalizers); munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; From noreply at buildbot.pypy.org Mon Oct 6 17:43:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 6 Oct 2014 17:43:12 +0200 (CEST) Subject: [pypy-commit] pypy default: fix msvc compiler choice override from CC environment variable Message-ID: <20141006154312.51ED31C023E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r73827:63b7ea1c8c2d Date: 2014-10-06 18:27 +0300 http://bitbucket.org/pypy/pypy/changeset/63b7ea1c8c2d/ Log: fix msvc compiler choice override from CC environment variable diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -354,8 +354,10 @@ platform = pick_platform(new_platform, cc) if not platform: raise ValueError("pick_platform(%r, %s) failed"%(new_platform, cc)) - log.msg("Set platform with %r cc=%s, using cc=%r" % (new_platform, cc, - getattr(platform, 'cc','Unknown'))) + log.msg("Set platform with %r cc=%s, using cc=%r, version=%r" % (new_platform, cc, + getattr(platform, 'cc','Unknown'), + getattr(platform, 'version','Unknown'), + )) if new_platform == 'host': global host diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -14,9 +14,11 @@ if not cc: cc = os.environ.get('CC','') if not cc: - return MsvcPlatform(cc=cc, x64=x64_flag) + return MsvcPlatform(x64=x64_flag) elif cc.startswith('mingw') or cc == 'gcc': return MingwPlatform(cc) + else: + return MsvcPlatform(cc=cc, x64=x64_flag) try: subprocess.check_output([cc, '--version']) except: @@ -108,11 +110,14 @@ def __init__(self, cc=None, x64=False): self.x64 = x64 - msvc_compiler_environ = find_msvc_env(x64) - Platform.__init__(self, 'cl.exe') - if msvc_compiler_environ: - self.c_environ = os.environ.copy() - self.c_environ.update(msvc_compiler_environ) + if cc is None: + msvc_compiler_environ = find_msvc_env(x64) + Platform.__init__(self, 'cl.exe') + if msvc_compiler_environ: + self.c_environ = os.environ.copy() + self.c_environ.update(msvc_compiler_environ) + else: + self.cc = cc # detect version of current compiler returncode, stdout, stderr = _run_subprocess(self.cc, '', From noreply at buildbot.pypy.org Mon Oct 6 17:43:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 6 Oct 2014 17:43:13 +0200 (CEST) Subject: [pypy-commit] pypy default: document how to use a different compiler for testing and translating on windows Message-ID: <20141006154313.7DEDD1C023E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r73828:f2834301312e Date: 2014-10-06 18:42 +0300 http://bitbucket.org/pypy/pypy/changeset/f2834301312e/ Log: document how to use a different compiler for testing and translating on windows diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,6 +37,13 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. +If you wish to override this detection method to use a different compiler +(mingw or a different version of MSVC): + +* set up the PATH and other environment variables as needed +* set the `CC` environment variable to compiler exe to be used, + for a different version of MSVC `SET CC=cl.exe`. + **Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. @@ -264,7 +271,7 @@ Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an -environment variable CC to the compliter exe, testing will use it. +environment variable CC to the compiler exe, testing will use it. .. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds From noreply at buildbot.pypy.org Mon Oct 6 17:52:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 17:52:01 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Fixes by self-review Message-ID: <20141006155201.B4C821C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1465:c33f019f2ec4 Date: 2014-10-06 17:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/c33f019f2ec4/ Log: Fixes by self-review diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -84,21 +84,30 @@ write_locks[lock_idx] = WL_FINALIZ_ORDER_1; } -static struct list_s *_finalizer_tmpstack; +static struct list_s *_finalizer_tmpstack, *_finalizer_emptystack; -static inline void append_to_finalizer_tmpstack(object_t **pobj) +static inline void _append_to_finalizer_tmpstack(object_t **pobj) { object_t *obj = *pobj; if (obj != NULL) LIST_APPEND(_finalizer_tmpstack, obj); } -static void _recursively_bump_finalization_state(object_t *obj, int from_state, - struct list_s *tmpstack) +static inline struct list_s *finalizer_trace(object_t *obj, struct list_s *lst) { + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); + _finalizer_tmpstack = lst; + stmcb_trace(realobj, &_append_to_finalizer_tmpstack); + return _finalizer_tmpstack; +} + +static void _recursively_bump_finalization_state(object_t *obj, int from_state) +{ + struct list_s *tmpstack = _finalizer_emptystack; + assert(list_is_empty(tmpstack)); + assert(_finalization_state(obj) == from_state); - assert(list_is_empty(tmpstack)); - _finalizer_tmpstack = tmpstack; while (1) { if (_finalization_state(obj) == from_state) { @@ -106,9 +115,7 @@ write_locks[mark_loc(obj)]++; /* trace */ - struct object_s *realobj = - (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); - stmcb_trace(realobj, &append_to_finalizer_tmpstack); + tmpstack = finalizer_trace(obj, tmpstack); } if (list_is_empty(tmpstack)) @@ -116,6 +123,7 @@ obj = (object_t *)list_pop_item(tmpstack); } + _finalizer_emptystack = tmpstack; } static void deal_with_objects_with_finalizers(void) @@ -129,7 +137,7 @@ struct list_s *new_with_finalizer = list_create(); struct list_s *marked = list_create(); struct list_s *pending = list_create(); - struct list_s *tmpstack = list_create(); + LIST_CREATE(_finalizer_emptystack); long j; for (j = 1; j <= NB_SEGMENTS; j++) { @@ -152,17 +160,13 @@ int state = _finalization_state(y); if (state <= 0) { _bump_finalization_state_from_0_to_1(y); - /* trace into the 'pending' list */ - struct object_s *realobj = - (struct object_s *)REAL_ADDRESS(stm_object_pages, y); - _finalizer_tmpstack = pending; - stmcb_trace(realobj, &append_to_finalizer_tmpstack); + pending = finalizer_trace(y, pending); } else if (state == 2) { - _recursively_bump_finalization_state(y, 2, tmpstack); + _recursively_bump_finalization_state(y, 2); } } - _recursively_bump_finalization_state(x, 1, tmpstack); + _recursively_bump_finalization_state(x, 1); } list_clear(lst); } @@ -175,14 +179,14 @@ assert(state >= 2); if (state == 2) { LIST_APPEND(run_finalizers, x); - _recursively_bump_finalization_state(x, 2, tmpstack); + _recursively_bump_finalization_state(x, 2); } else { LIST_APPEND(new_with_finalizer, x); } } - list_free(tmpstack); + LIST_FREE(_finalizer_emptystack); list_free(pending); list_free(marked); list_free(get_priv_segment(1)->objects_with_finalizers); diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -2,7 +2,7 @@ import py -class TestFinalizer(BaseTest): +class TestLightFinalizer(BaseTest): def setup_method(self, meth): BaseTest.setup_method(self, meth) From noreply at buildbot.pypy.org Mon Oct 6 18:23:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 18:23:36 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Write down some rules, and figured out that there is a problem with Message-ID: <20141006162336.7A9E21C0469@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1466:d5a95166f741 Date: 2014-10-06 18:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/d5a95166f741/ Log: Write down some rules, and figured out that there is a problem with light finalizers too diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -1,6 +1,7 @@ void (*stmcb_light_finalizer)(object_t *); +void (*stmcb_finalizer)(object_t *); void stm_enable_light_finalizer(object_t *obj) { @@ -10,6 +11,11 @@ LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); } +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up) +{ + abort(); // NOT IMPLEMENTED +} + static void deal_with_young_objects_with_finalizers(void) { /* for light finalizers */ @@ -47,7 +53,7 @@ object_t* obj = (object_t *)list_item(lst, i); if (!mark_visited_test(obj)) { /* not marked: object dies */ - /* we're calling the light finalizer is a random thread, + /* we're calling the light finalizer in a random thread, but it should work, because it was dead already at the start of that thread's transaction, so any thread should see the same, old content */ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -511,6 +511,18 @@ void (*stmcb_light_finalizer)(object_t *); void stm_enable_light_finalizer(object_t *); +/* Support for regular finalizers. Unreachable objects with + finalizers are kept alive, as well as everything they point to, and + stmcb_finalizer() is called after the major GC. If there are + several objects with finalizers that reference each other in a + well-defined order (i.e. there are no cycles), then they are + finalized in order from outermost to innermost (i.e. starting with + the ones that are unreachable even from others). The finalizer is + called in a random thread, except for objects that have been + created by the current transaction in a thread; in that case, only + that thread can call the finalizer. */ +void (*stmcb_finalizer)(object_t *); +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -31,6 +31,7 @@ /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); object_t *stm_allocate_weakref(ssize_t size_rounded_up); +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); /*void stm_write_card(); use _checked_stm_write_card() instead */ diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -73,3 +73,32 @@ stm_major_collect() self.commit_transaction() self.expect_finalized([]) + + +class TestRegularFinalizer(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def finalizer(obj): + self.finalizers_called.append(obj) + self.finalizers_called = [] + lib.stmcb_finalizer = finalizer + self._finalizer_keepalive = finalizer + + def expect_finalized(self, objs): + assert self.finalizers_called == objs + self.finalizers_called = [] + + def test_no_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + stm_major_collect() + self.expect_finalized([]) + + def test_no_finalizer_in_minor_collection(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(48) + stm_minor_collect() + self.expect_finalized([]) From noreply at buildbot.pypy.org Mon Oct 6 18:53:02 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Mon, 6 Oct 2014 18:53:02 +0200 (CEST) Subject: [pypy-commit] jitviewer css_tweaks: fix fixed header positioning - always stick to the right Message-ID: <20141006165302.7CEC21D287D@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: css_tweaks Changeset: r262:b72955d20598 Date: 2014-10-05 13:30 +0400 http://bitbucket.org/pypy/jitviewer/changeset/b72955d20598/ Log: fix fixed header positioning - always stick to the right diff --git a/_jitviewer/static/style.css b/_jitviewer/static/style.css --- a/_jitviewer/static/style.css +++ b/_jitviewer/static/style.css @@ -76,8 +76,8 @@ width: 360px; position: fixed; - top: 30px; - left: 920px; + top: 10px; + right: 10px; padding: 5px; border: 1px solid #cacaca; From noreply at buildbot.pypy.org Mon Oct 6 18:53:03 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Mon, 6 Oct 2014 18:53:03 +0200 (CEST) Subject: [pypy-commit] jitviewer css_tweaks: tweak fixed header padding a bit Message-ID: <20141006165303.9A64F1D287D@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: css_tweaks Changeset: r263:2c7924df1337 Date: 2014-10-05 13:31 +0400 http://bitbucket.org/pypy/jitviewer/changeset/2c7924df1337/ Log: tweak fixed header padding a bit diff --git a/_jitviewer/static/style.css b/_jitviewer/static/style.css --- a/_jitviewer/static/style.css +++ b/_jitviewer/static/style.css @@ -235,6 +235,11 @@ .menu { background: #cccccc; text-color: red; + padding-left: 5px; +} +header>a, +header>div { + padding-left: 5px; } h1 { From noreply at buildbot.pypy.org Mon Oct 6 18:53:04 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Mon, 6 Oct 2014 18:53:04 +0200 (CEST) Subject: [pypy-commit] jitviewer css_tweaks: instead of huge top margin, add a little margin between top header and main area Message-ID: <20141006165304.998C11D287D@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: css_tweaks Changeset: r264:ef5d93e0d0ff Date: 2014-10-05 13:32 +0400 http://bitbucket.org/pypy/jitviewer/changeset/ef5d93e0d0ff/ Log: instead of huge top margin, add a little margin between top header and main area diff --git a/_jitviewer/static/style.css b/_jitviewer/static/style.css --- a/_jitviewer/static/style.css +++ b/_jitviewer/static/style.css @@ -18,7 +18,7 @@ line-height: 22px; margin-left: 0px; - margin-top: 60px; + margin-top: 0px; } #single_loop { float: right; /*fijal, Po co ci to?*/ @@ -32,6 +32,7 @@ #filter { margin-left: 15px; + margin-top: 15px; } From noreply at buildbot.pypy.org Mon Oct 6 18:53:05 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Mon, 6 Oct 2014 18:53:05 +0200 (CEST) Subject: [pypy-commit] jitviewer css_tweaks: add favicon Message-ID: <20141006165305.999A71D287D@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: css_tweaks Changeset: r265:774420e52ac4 Date: 2014-10-05 13:50 +0400 http://bitbucket.org/pypy/jitviewer/changeset/774420e52ac4/ Log: add favicon diff --git a/_jitviewer/static/favicon.ico b/_jitviewer/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..00cf381e7e01e4603affa3da5a27d99b17943bc0 GIT binary patch [cut] diff --git a/_jitviewer/templates/index.html b/_jitviewer/templates/index.html --- a/_jitviewer/templates/index.html +++ b/_jitviewer/templates/index.html @@ -1,6 +1,7 @@ PyPy JIT Viewer: {{ filename }} + {% if qt_workaround %} From noreply at buildbot.pypy.org Mon Oct 6 18:53:06 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 6 Oct 2014 18:53:06 +0200 (CEST) Subject: [pypy-commit] jitviewer default: Merged in kostialopuhin/jitviewer/css_tweaks (pull request #9) Message-ID: <20141006165306.9DC201D287D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r266:ec561fb900e0 Date: 2014-10-06 18:53 +0200 http://bitbucket.org/pypy/jitviewer/changeset/ec561fb900e0/ Log: Merged in kostialopuhin/jitviewer/css_tweaks (pull request #9) Little css tweaks diff --git a/_jitviewer/static/favicon.ico b/_jitviewer/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..00cf381e7e01e4603affa3da5a27d99b17943bc0 GIT binary patch [cut] diff --git a/_jitviewer/static/style.css b/_jitviewer/static/style.css --- a/_jitviewer/static/style.css +++ b/_jitviewer/static/style.css @@ -18,7 +18,7 @@ line-height: 22px; margin-left: 0px; - margin-top: 60px; + margin-top: 0px; } #single_loop { float: right; /*fijal, Po co ci to?*/ @@ -32,6 +32,7 @@ #filter { margin-left: 15px; + margin-top: 15px; } @@ -76,8 +77,8 @@ width: 360px; position: fixed; - top: 30px; - left: 920px; + top: 10px; + right: 10px; padding: 5px; border: 1px solid #cacaca; @@ -235,6 +236,11 @@ .menu { background: #cccccc; text-color: red; + padding-left: 5px; +} +header>a, +header>div { + padding-left: 5px; } h1 { diff --git a/_jitviewer/templates/index.html b/_jitviewer/templates/index.html --- a/_jitviewer/templates/index.html +++ b/_jitviewer/templates/index.html @@ -1,6 +1,7 @@ PyPy JIT Viewer: {{ filename }} + {% if qt_workaround %} From noreply at buildbot.pypy.org Mon Oct 6 20:35:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 6 Oct 2014 20:35:07 +0200 (CEST) Subject: [pypy-commit] pypy default: allow surrogates by default in narrow builds (windows) Message-ID: <20141006183507.394811C023E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r73829:b933cb080e90 Date: 2014-10-06 21:28 +0300 http://bitbucket.org/pypy/pypy/changeset/b933cb080e90/ Log: allow surrogates by default in narrow builds (windows) Note that pypy unicodeobject calls the utf8 functions with 'allow_surrogates=True'. diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -8,8 +8,10 @@ if rffi.sizeof(lltype.UniChar) == 4: MAXUNICODE = 0x10ffff + allow_surrogate_by_default = False else: MAXUNICODE = 0xffff + allow_surrogate_by_default = True BYTEORDER = sys.byteorder @@ -122,7 +124,7 @@ ] def str_decode_utf_8(s, size, errors, final=False, - errorhandler=None, allow_surrogates=False): + errorhandler=None, allow_surrogates=allow_surrogate_by_default): if errorhandler is None: errorhandler = default_unicode_error_decode result = UnicodeBuilder(size) @@ -304,7 +306,7 @@ result.append((chr((0x80 | (ch & 0x3f))))) def unicode_encode_utf_8(s, size, errors, errorhandler=None, - allow_surrogates=False): + allow_surrogates=allow_surrogate_by_default): if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, From noreply at buildbot.pypy.org Mon Oct 6 22:48:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 6 Oct 2014 22:48:49 +0200 (CEST) Subject: [pypy-commit] buildbot default: Add "rebuy-de", a contributed buildslave for OS/X that seems to work better Message-ID: <20141006204849.5A9EE1C0548@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r920:0d8dc013bb65 Date: 2014-10-06 22:47 +0200 http://bitbucket.org/pypy/buildbot/changeset/0d8dc013bb65/ Log: Add "rebuy-de", a contributed buildslave for OS/X that seems to work better than xerxes. diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -172,7 +172,7 @@ JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" JITMACOSX64 = "pypy-c-jit-macosx-x86-64" -JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" +#JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" JITWIN32 = "pypy-c-jit-win-x86-32" JITWIN64 = "pypy-c-jit-win-x86-64" JITFREEBSD764 = 'pypy-c-jit-freebsd-7-x86-64' @@ -264,7 +264,7 @@ JITLINUX32, JITLINUX64, JITMACOSX64, - JITMACOSX64_2, + #JITMACOSX64_2, JITWIN32, JITWIN64, JITFREEBSD764, @@ -373,17 +373,17 @@ "category": 'mac32' }, {"name" : JITMACOSX64, - "slavenames": ["xerxes", "tosh"], + "slavenames": ["rebuy-de", "xerxes", "tosh"], 'builddir' : JITMACOSX64, 'factory' : pypyJITTranslatedTestFactoryOSX64, 'category' : 'mac64', }, - {"name" : JITMACOSX64_2, - "slavenames": ["xerxes", "tosh"], - 'builddir' : JITMACOSX64_2, - 'factory' : pypyJITTranslatedTestFactoryOSX64, - 'category' : 'mac64', - }, + #{"name" : JITMACOSX64_2, + # "slavenames": ["rebuy-de", "xerxes", "tosh"], + # 'builddir' : JITMACOSX64_2, + # 'factory' : pypyJITTranslatedTestFactoryOSX64, + # 'category' : 'mac64', + # }, {"name": WIN32, "slavenames": ["SalsaSalsa", "allegro_win32"], "builddir": WIN32, From noreply at buildbot.pypy.org Mon Oct 6 23:44:03 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 6 Oct 2014 23:44:03 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize quasi-immutable fields during tracing Message-ID: <20141006214403.9C2A81C023E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73830:63f7e728d745 Date: 2014-10-06 23:17 +0200 http://bitbucket.org/pypy/pypy/changeset/63f7e728d745/ Log: optimize quasi-immutable fields during tracing this reduces the warmup. still missing to do the same thing for quasi-immutable arrays. the commit removes the previous support for changing the quasi- immutable field during tracing (which was rather obscure anyway) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -785,6 +785,8 @@ raise Exception("getfield_raw_r (without _pure) not supported") # if immut in (IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY): + if immut is IR_QUASIIMMUTABLE: + op1.opname += "_pure" descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -6,6 +6,7 @@ from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -544,12 +545,10 @@ qmutdescr = op.getdescr() assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed - # already between the tracing and now. In this case, we are - # simply ignoring the QUASIIMMUT_FIELD hint and compiling it - # as a regular getfield. + # already between the tracing and now. In this case, we mark the loop + # as invalid if not qmutdescr.is_still_valid_for(structvalue.get_key_box()): - self._remove_guard_not_invalidated = True - return + raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8253,13 +8253,7 @@ setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) """ - expected = """ - [] - p72 = getfield_gc(ConstPtr(myptr2), descr=quasifielddescr) - guard_value(p72, -4247) [] - jump() - """ - self.optimize_loop(ops, expected) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_issue1080_infinitie_loop_simple(self): ops = """ @@ -8270,13 +8264,7 @@ guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ - expected = """ - [] - p72 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) - guard_value(p72, -4247) [] - jump() - """ - self.optimize_loop(ops, expected) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_only_strengthen_guard_if_class_matches(self): ops = """ diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -81,6 +81,27 @@ assert len(loop.quasi_immutable_deps) == 1 assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + def test_simple_optimize_during_tracing(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7], enable_opts="") + assert res == 700 + # there should be no getfields, even though optimizations are turned off + self.check_resops(guard_not_invalidated=1, getfield_gc=0) + def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) class Foo: @@ -102,7 +123,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc=3) + self.check_resops(guard_not_invalidated=0, getfield_gc=1, getfield_gc_pure=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -154,11 +175,12 @@ residual_call(foo) x -= 1 return total - # + assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc=2) + # the loop is invalid, so nothing is traced + self.check_aborted_count(2) def test_change_during_tracing_2(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -184,7 +206,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=2) + self.check_resops(guard_not_invalidated=0, getfield_gc=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) From noreply at buildbot.pypy.org Tue Oct 7 09:49:22 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 7 Oct 2014 09:49:22 +0200 (CEST) Subject: [pypy-commit] pypy default: fix jtransform test Message-ID: <20141007074922.D49591C0130@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73831:1a1bcffa25e3 Date: 2014-10-07 09:48 +0200 http://bitbucket.org/pypy/pypy/changeset/1a1bcffa25e3/ Log: fix jtransform test diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1296,7 +1296,7 @@ assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') assert op1.result is None - assert op2.opname == 'getfield_gc_i' + assert op2.opname == 'getfield_gc_i_pure' assert len(op2.args) == 2 assert op2.args[0] == v_x assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') From noreply at buildbot.pypy.org Tue Oct 7 09:58:34 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 7 Oct 2014 09:58:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_pypy_c Message-ID: <20141007075834.619D91C023E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73832:a65ca7b61625 Date: 2014-10-07 09:58 +0200 http://bitbucket.org/pypy/pypy/changeset/a65ca7b61625/ Log: fix test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -85,9 +85,9 @@ p38 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc_pure(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc_pure(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -447,9 +447,9 @@ p29 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p30 = getfield_gc(p29, descr=) p31 = force_token() - p32 = getfield_gc(p29, descr=) + p32 = getfield_gc_pure(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc(p29, descr=) + i34 = getfield_gc_pure(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc(ConstPtr(ptr36), descr=) From noreply at buildbot.pypy.org Tue Oct 7 10:32:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Oct 2014 10:32:59 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: Don't run a light finalizer in a segment where the object is not visible Message-ID: <20141007083259.A32391D28BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1467:45014eae5453 Date: 2014-10-07 10:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/45014eae5453/ Log: Don't run a light finalizer in a segment where the object is not visible yet. diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -42,6 +42,8 @@ static void deal_with_old_objects_with_finalizers(void) { /* for light finalizers */ + int old_gs_register = STM_SEGMENT->segment_num; + int current_gs_register = old_gs_register; long j; for (j = 1; j <= NB_SEGMENTS; j++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(j); @@ -53,10 +55,21 @@ object_t* obj = (object_t *)list_item(lst, i); if (!mark_visited_test(obj)) { /* not marked: object dies */ - /* we're calling the light finalizer in a random thread, - but it should work, because it was dead already at the - start of that thread's transaction, so any thread should - see the same, old content */ + /* we're calling the light finalizer in the same + segment as where it was originally registered. For + objects that existed since a long time, it doesn't + change anything: any thread should see the same old + content (because if it wasn't the case, the object + would be in a 'modified_old_objects' list + somewhere, and so it wouldn't be dead). But it's + important if the object was created by the same + transaction: then only that segment sees valid + content. + */ + if (j != current_gs_register) { + set_gs_register(get_segment_base(j)); + current_gs_register = j; + } stmcb_light_finalizer(obj); } else { @@ -65,6 +78,8 @@ } } } + if (old_gs_register != current_gs_register) + set_gs_register(get_segment_base(old_gs_register)); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -63,6 +63,7 @@ bool _check_become_inevitable(stm_thread_local_t *tl); bool _check_become_globally_unique_transaction(stm_thread_local_t *tl); int stm_is_inevitable(void); +long current_segment_num(void); void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); @@ -374,6 +375,11 @@ void stmcb_commit_soon() { } + +long current_segment_num(void) +{ + return STM_SEGMENT->segment_num; +} ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_NO_AUTOMATIC_SETJMP', '1'), diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -9,13 +9,22 @@ # @ffi.callback("void(object_t *)") def light_finalizer(obj): - self.light_finalizers_called.append(obj) + segnum = lib.current_segment_num() + tlnum = '?' + for n, tl in enumerate(self.tls): + if tl.associated_segment_num == segnum: + tlnum = n + break + self.light_finalizers_called.append((obj, tlnum)) self.light_finalizers_called = [] lib.stmcb_light_finalizer = light_finalizer self._light_finalizer_keepalive = light_finalizer - def expect_finalized(self, objs): - assert self.light_finalizers_called == objs + def expect_finalized(self, objs, from_tlnum=None): + assert [obj for (obj, tlnum) in self.light_finalizers_called] == objs + if from_tlnum is not None: + for obj, tlnum in self.light_finalizers_called: + assert tlnum == from_tlnum self.light_finalizers_called = [] def test_no_finalizer(self): @@ -30,7 +39,7 @@ lib.stm_enable_light_finalizer(lp1) self.expect_finalized([]) self.commit_transaction() - self.expect_finalized([lp1]) + self.expect_finalized([lp1], from_tlnum=0) def test_young_light_finalizer_survives(self): self.start_transaction() @@ -50,7 +59,7 @@ self.commit_transaction() self.expect_finalized([]) - def test_old_light_finalizer(self): + def test_old_light_finalizer_2(self): self.start_transaction() lp1 = stm_allocate(48) lib.stm_enable_light_finalizer(lp1) @@ -74,6 +83,22 @@ self.commit_transaction() self.expect_finalized([]) + def test_old_light_finalizer_segment(self): + self.start_transaction() + # + self.switch(1) + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # + self.switch(0) + self.expect_finalized([]) + stm_major_collect() + self.expect_finalized([lp1], from_tlnum=1) + class TestRegularFinalizer(BaseTest): From noreply at buildbot.pypy.org Tue Oct 7 10:45:33 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 7 Oct 2014 10:45:33 +0200 (CEST) Subject: [pypy-commit] pypy default: update comment and actually stop putting things into the heap cache (which is Message-ID: <20141007084533.E53B71C023E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73833:b9d7ef669611 Date: 2014-10-07 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/b9d7ef669611/ Log: update comment and actually stop putting things into the heap cache (which is unnecessary, now that we generate a getfield_gc_pure) adapt optimizeopt tests diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -532,10 +532,14 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC(s, descr='inst_x') - # If 's' is a constant (after optimizations), then we make 's.inst_x' - # a constant too, and we rely on the rest of the optimizations to - # constant-fold the following getfield_gc. + # x = GETFIELD_GC_PURE(s, descr='inst_x') + # If 's' is a constant (after optimizations) we rely on the rest of the + # optimizations to constant-fold the following getfield_gc_pure. + # in addition, we record the dependency here to make invalidation work + # correctly. + # NB: emitting the GETFIELD_GC_PURE is only safe because the + # QUASIIMMUT_FIELD is also emitted to make sure the dependency is + # registered. structvalue = self.getvalue(op.getarg(0)) if not structvalue.is_constant(): self._remove_guard_not_invalidated = True @@ -553,11 +557,6 @@ if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None - # perform the replacement in the list of operations - fieldvalue = self.getvalue(qmutdescr.constantfieldbox) - cf = self.field_cache(qmutdescr.fielddescr) - cf.force_lazy_setfield(self) - cf.remember_field_value(structvalue, fieldvalue) self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,7 +5200,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure(123, i0, descr=nonwritedescr) finish(i1) """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6893,13 +6893,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(p0, descr=quasifielddescr) + i1 = getfield_gc_pure(p0, descr=quasifielddescr) escape(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc(p0, descr=quasifielddescr) + i1 = getfield_gc_pure(p0, descr=quasifielddescr) escape(i1) jump(p1, p0, i1) """ @@ -6910,7 +6910,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) escape(i1) jump() """ @@ -6962,11 +6962,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape(i1) i4 = escape(i2) jump(i3, i4) @@ -6989,11 +6989,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(p, descr=quasifielddescr) + i1 = getfield_gc_pure(p, descr=quasifielddescr) call_may_force(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc(p, descr=quasifielddescr) + i2 = getfield_gc_pure(p, descr=quasifielddescr) i3 = escape(i1) i4 = escape(i2) jump(i3, i4) @@ -8242,7 +8242,7 @@ quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_pure(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(ConstClass(node_vtable)) @@ -8260,7 +8260,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_pure(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ From noreply at buildbot.pypy.org Tue Oct 7 10:45:35 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 7 Oct 2014 10:45:35 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize quasi-immutable lists?[*] in the same way Message-ID: <20141007084535.15D091C023E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r73834:d888c64eab67 Date: 2014-10-07 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/d888c64eab67/ Log: optimize quasi-immutable lists?[*] in the same way diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -785,8 +785,7 @@ raise Exception("getfield_raw_r (without _pure) not supported") # if immut in (IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY): - if immut is IR_QUASIIMMUTABLE: - op1.opname += "_pure" + op1.opname += "_pure" descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -369,7 +369,7 @@ res = self.meta_interp(f, [100, 7]) assert res == 700 self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, - getarrayitem_gc=0, getfield_gc=0) + getarrayitem_gc=0, getfield_gc=0, getfield_gc_pure=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -377,6 +377,30 @@ assert len(loop.quasi_immutable_deps) == 1 assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + def test_list_optimized_while_tracing(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['lst?[*]'] + def __init__(self, lst): + self.lst = lst + def f(a, x): + lst1 = [0, 0] + lst1[1] = a + foo = Foo(lst1) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.lst[1] + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7], enable_opts="") + assert res == 700 + # operations must have been removed by the frontend + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=1, + getarrayitem_gc=0, getfield_gc=0, getfield_gc_pure=0) + def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) class Foo: From noreply at buildbot.pypy.org Tue Oct 7 11:10:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Oct 2014 11:10:17 +0200 (CEST) Subject: [pypy-commit] stmgc finalizer: in-progress Message-ID: <20141007091017.CBDDC1D27E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: finalizer Changeset: r1468:bc9a0f2e120d Date: 2014-10-07 11:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/bc9a0f2e120d/ Log: in-progress diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -203,6 +203,7 @@ struct list_s *young_objects_with_light_finalizers; struct list_s *old_objects_with_light_finalizers; struct list_s *objects_with_finalizers; + struct list_s *run_finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -123,15 +123,13 @@ return _finalizer_tmpstack; } -static void _recursively_bump_finalization_state(object_t *obj, int from_state) +static void _recursively_bump_finalization_state(object_t *obj, int to_state) { struct list_s *tmpstack = _finalizer_emptystack; assert(list_is_empty(tmpstack)); - assert(_finalization_state(obj) == from_state); - while (1) { - if (_finalization_state(obj) == from_state) { + if (_finalization_state(obj) == to_state - 1) { /* bump to the next state */ write_locks[mark_loc(obj)]++; @@ -151,27 +149,35 @@ { /* for non-light finalizers */ - /* there is one 'objects_with_finalizers' list per segment, but it - doesn't really matter: all objects are considered equal, and if - they survive, they are added again into one list that is attached - at the end to an arbitrary segment. */ - struct list_s *new_with_finalizer = list_create(); - struct list_s *marked = list_create(); - struct list_s *pending = list_create(); + /* there is one 'objects_with_finalizers' list per segment. + Objects that survives remain the their original segment's list. + For objects that existed since a long time, it doesn't change + anything: any thread, through any segment, should see the same + old content. (If the content was different between segments, + the object would be in a 'modified_old_objects' list somewhere, + and so it wouldn't be dead). But it's important if the object + was created by the same transaction: then only that segment + sees valid content. + */ + struct list_s *marked_seg[NB_SEGMENTS]; + struct list_s *pending; LIST_CREATE(_finalizer_emptystack); + LIST_CREATE(pending); long j; for (j = 1; j <= NB_SEGMENTS; j++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + struct list_s *marked = list_create(); struct list_s *lst = pseg->objects_with_finalizers; long i, count = list_count(lst); + lst->count = 0; for (i = 0; i < count; i++) { object_t *x = (object_t *)list_item(lst, i); assert(_finalization_state(x) != 1); if (_finalization_state(x) >= 2) { - LIST_APPEND(new_with_finalizer, x); + list_set_item(lst, lst->count++, (uintptr_t)x); continue; } LIST_APPEND(marked, x); @@ -184,32 +190,39 @@ pending = finalizer_trace(y, pending); } else if (state == 2) { - _recursively_bump_finalization_state(y, 2); + _recursively_bump_finalization_state(y, 3); } } - _recursively_bump_finalization_state(x, 1); + assert(_finalization_state(x) == 1); + _recursively_bump_finalization_state(x, 2); } - list_clear(lst); + + marked_seg[j - 1] = marked; } - long i, count = list_count(marked); - for (i = 0; i < count; i++) { - object_t *x = (object_t *)list_item(marked, i); + LIST_FREE(pending); - int state = _finalization_state(x); - assert(state >= 2); - if (state == 2) { - LIST_APPEND(run_finalizers, x); - _recursively_bump_finalization_state(x, 2); + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + struct list_s *lst = pseg->objects_with_finalizers; + struct list_s *marked = marked_seg[j - 1]; + + long i, count = list_count(marked); + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(marked, i); + + int state = _finalization_state(x); + assert(state >= 2); + if (state == 2) { + LIST_APPEND(pseg->run_finalizers, x); + _recursively_bump_finalization_state(x, 3); + } + else { + list_set_item(lst, lst->count++, (uintptr_t)x); + } } - else { - LIST_APPEND(new_with_finalizer, x); - } + list_free(marked); } LIST_FREE(_finalizer_emptystack); - list_free(pending); - list_free(marked); - list_free(get_priv_segment(1)->objects_with_finalizers); - get_priv_segment(1)->objects_with_finalizers = new_with_finalizer; } diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h --- a/c7/stm/finalizer.h +++ b/c7/stm/finalizer.h @@ -2,5 +2,3 @@ static void deal_with_young_objects_with_finalizers(void); static void deal_with_old_objects_with_finalizers(void); static void deal_with_objects_with_finalizers(void); - -static struct list_s *run_finalizers; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -131,11 +131,11 @@ pr->young_objects_with_light_finalizers = list_create(); pr->old_objects_with_light_finalizers = list_create(); pr->objects_with_finalizers = list_create(); + pr->run_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; } - run_finalizers = list_create(); /* The pages are shared lazily, as remap_file_pages() takes a relatively long time for each page. @@ -176,8 +176,8 @@ list_free(pr->young_objects_with_light_finalizers); list_free(pr->old_objects_with_light_finalizers); list_free(pr->objects_with_finalizers); + list_free(pr->run_finalizers); } - list_free(run_finalizers); munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; From noreply at buildbot.pypy.org Tue Oct 7 20:18:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Oct 2014 20:18:04 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20141007181804.668491C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r546:0175b0971d89 Date: 2014-10-07 20:18 +0200 http://bitbucket.org/pypy/pypy.org/changeset/0175b0971d89/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $19037 of $80000 (23.8%) + $19237 of $80000 (24.0%)
    From noreply at buildbot.pypy.org Tue Oct 7 21:31:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 7 Oct 2014 21:31:06 +0200 (CEST) Subject: [pypy-commit] pypy default: these getfield_gc show up with _pure now Message-ID: <20141007193106.8E1DD1C023E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73835:e57358cce5ad Date: 2014-10-07 15:30 -0400 http://bitbucket.org/pypy/pypy/changeset/e57358cce5ad/ Log: these getfield_gc show up with _pure now diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -599,8 +599,7 @@ 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 8, - 'getfield_gc_pure': 44, + 'getfield_gc_pure': 52, 'guard_class': 4, 'guard_false': 14, 'guard_not_invalidated': 2, From noreply at buildbot.pypy.org Wed Oct 8 00:47:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 8 Oct 2014 00:47:22 +0200 (CEST) Subject: [pypy-commit] pypy default: fix validation of shape in empty/zeros Message-ID: <20141007224722.B4CE21C10DD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73836:d3789bd3b3ed Date: 2014-10-07 18:47 -0400 http://bitbucket.org/pypy/pypy/changeset/d3789bd3b3ed/ Log: fix validation of shape in empty/zeros diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,7 +3,7 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop +from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter @@ -134,6 +134,15 @@ if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = shape_converter(space, w_shape, dtype) + for dim in shape: + if dim < 0: + raise OperationError(space.w_ValueError, space.wrap( + "negative dimensions are not allowed")) + try: + support.product(shape) + except OverflowError: + raise OperationError(space.w_ValueError, space.wrap( + "array is too big.")) return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.rarithmetic import ovfcheck def issequence_w(space, w_obj): @@ -23,7 +24,7 @@ def product(s): i = 1 for x in s: - i *= x + i = ovfcheck(i * x) return i diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -384,6 +384,19 @@ assert zeros((), dtype='S').shape == () assert zeros((), dtype='S').dtype == '|S1' + def test_check_shape(self): + import numpy as np + for func in [np.zeros, np.empty]: + exc = raises(ValueError, func, [0, -1, 1], 'int8') + assert str(exc.value) == "negative dimensions are not allowed" + exc = raises(ValueError, func, [2, -1, 3], 'int8') + assert str(exc.value) == "negative dimensions are not allowed" + + exc = raises(ValueError, func, [975]*7, 'int8') + assert str(exc.value) == "array is too big." + exc = raises(ValueError, func, [26244]*5, 'int8') + assert str(exc.value) == "array is too big." + def test_empty_like(self): import numpy as np a = np.empty_like(np.zeros(())) From noreply at buildbot.pypy.org Wed Oct 8 02:14:36 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 8 Oct 2014 02:14:36 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: simplify code Message-ID: <20141008001436.2AF751C023E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73837:13bb925941fe Date: 2014-10-06 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/13bb925941fe/ Log: simplify code diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -13,7 +13,7 @@ SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue +from rpython.annotator.bookkeeper import immutablevalue from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op from rpython.rlib import rarithmetic @@ -37,18 +37,21 @@ if s_obj1.const is None and not s_obj2.can_be_none(): r.const = False knowntypedata = {} + bk = annotator.bookkeeper def bind(src_obj, tgt_obj): - if hasattr(annotator.annotation(tgt_obj), 'is_type_of') and annotator.annotation(src_obj).is_constant(): + s_src = annotator.annotation(src_obj) + s_tgt = annotator.annotation(tgt_obj) + if hasattr(s_tgt, 'is_type_of') and s_src.is_constant(): add_knowntypedata( knowntypedata, True, - annotator.annotation(tgt_obj).is_type_of, - getbookkeeper().valueoftype(annotator.annotation(src_obj).const)) - add_knowntypedata(knowntypedata, True, [tgt_obj], annotator.annotation(src_obj)) - s_nonnone = annotator.annotation(tgt_obj) - if (annotator.annotation(src_obj).is_constant() and annotator.annotation(src_obj).const is None and - annotator.annotation(tgt_obj).can_be_none()): - s_nonnone = annotator.annotation(tgt_obj).nonnoneify() + s_tgt.is_type_of, + bk.valueoftype(s_src.const)) + add_knowntypedata(knowntypedata, True, [tgt_obj], s_src) + s_nonnone = s_tgt + if (s_src.is_constant() and s_src.const is None and + s_tgt.can_be_none()): + s_nonnone = s_tgt.nonnoneify() add_knowntypedata(knowntypedata, False, [tgt_obj], s_nonnone) bind(obj2, obj1) @@ -717,9 +720,11 @@ def is__PBC_PBC(annotator, pbc1, pbc2): s = is__default(annotator, pbc1, pbc2) if not s.is_constant(): - if not annotator.annotation(pbc1).can_be_None or not annotator.annotation(pbc2).can_be_None: - for desc in annotator.annotation(pbc1).descriptions: - if desc in annotator.annotation(pbc2).descriptions: + s_pbc1 = annotator.annotation(pbc1) + s_pbc2 = annotator.annotation(pbc2) + if not s_pbc1.can_be_None or not s_pbc2.can_be_None: + for desc in s_pbc1.descriptions: + if desc in s_pbc2.descriptions: break else: s.const = False # no common desc in the two sets From noreply at buildbot.pypy.org Wed Oct 8 05:07:50 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 8 Oct 2014 05:07:50 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: fix Message-ID: <20141008030750.6E9501C0F1D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73838:7a0aa5c59924 Date: 2014-10-08 04:06 +0100 http://bitbucket.org/pypy/pypy/changeset/7a0aa5c59924/ Log: fix diff --git a/rpython/rlib/_stacklet_n_a.py b/rpython/rlib/_stacklet_n_a.py --- a/rpython/rlib/_stacklet_n_a.py +++ b/rpython/rlib/_stacklet_n_a.py @@ -1,16 +1,17 @@ from rpython.rlib import _rffi_stacklet as _c -from rpython.rlib import objectmodel, debug +from rpython.rlib import debug +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.annlowlevel import llhelper class StackletGcRootFinder(object): @staticmethod + @specialize.arg(1) def new(thrd, callback, arg): h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) if not h: raise MemoryError return h - new._annspecialcase_ = 'specialize:arg(1)' @staticmethod def switch(h): @@ -22,7 +23,7 @@ @staticmethod def destroy(thrd, h): _c.destroy(thrd._thrd, h) - if objectmodel.we_are_translated(): + if we_are_translated(): debug.debug_print("not using a framework GC: " "stacklet_destroy() may leak") diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -68,7 +68,6 @@ class StackletGcRootFinder(object): - @staticmethod def new(thrd, callback, arg): gcrootfinder.callback = callback thread_handle = thrd._thrd @@ -76,8 +75,8 @@ h = _c.new(thread_handle, llhelper(_c.run_fn, _new_callback), arg) return get_result_suspstack(h) new._dont_inline_ = True + new = staticmethod(new) - @staticmethod def switch(suspstack): # suspstack has a handle to target, i.e. where to switch to ll_assert(suspstack != gcrootfinder.oldsuspstack, @@ -89,6 +88,7 @@ h = _c.switch(h) return get_result_suspstack(h) switch._dont_inline_ = True + switch = staticmethod(switch) @staticmethod def is_empty_handle(suspstack): From noreply at buildbot.pypy.org Wed Oct 8 12:08:55 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Wed, 8 Oct 2014 12:08:55 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Update arrival date. Message-ID: <20141008100855.E80131C10DD@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5428:a6eb87751251 Date: 2014-10-08 12:10 +0200 http://bitbucket.org/pypy/extradoc/changeset/a6eb87751251/ Log: Update arrival date. diff --git a/sprintinfo/warsaw-2014/people.txt b/sprintinfo/warsaw-2014/people.txt --- a/sprintinfo/warsaw-2014/people.txt +++ b/sprintinfo/warsaw-2014/people.txt @@ -12,7 +12,7 @@ Armin Rigo 20/10-28/10 with fijal Maciej Fijalkowski 20/10-30/10 private Romain Guillebert 19/10-26-10 ibis Reduta with mjacob -Manuel Jacob 20/10-26/10 ibis Reduta with rguillebert +Manuel Jacob 19/10-26/10 ibis Reduta with rguillebert Kostia Lopuhin Antonio Cuni 20/10-26/10 ibis Reduta http://www.ibis.com/gb/hotel-7125-ibis-warszawa-reduta/index.shtml Matti Picus 20/10-20/10 just a long layover between flights From noreply at buildbot.pypy.org Wed Oct 8 17:50:40 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 8 Oct 2014 17:50:40 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge rtyper-stuff Message-ID: <20141008155040.707C51C3474@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73839:ecea026eb7e3 Date: 2014-10-08 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/ecea026eb7e3/ Log: hg merge rtyper-stuff diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,6 @@ Allocate by 4-byte chunks in rffi_platform, Skip testing objdump if it does not exist, and other small adjustments in own tests + +.. branch: rtyper-stuff +Small internal refactorings in the rtyper. diff --git a/rpython/rlib/_stacklet_n_a.py b/rpython/rlib/_stacklet_n_a.py --- a/rpython/rlib/_stacklet_n_a.py +++ b/rpython/rlib/_stacklet_n_a.py @@ -1,33 +1,35 @@ from rpython.rlib import _rffi_stacklet as _c -from rpython.rlib import objectmodel, debug +from rpython.rlib import debug +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.annlowlevel import llhelper -from rpython.tool.staticmethods import StaticMethods -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): + @staticmethod + @specialize.arg(1) def new(thrd, callback, arg): h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) if not h: raise MemoryError return h - new._annspecialcase_ = 'specialize:arg(1)' + @staticmethod def switch(h): h = _c.switch(h) if not h: raise MemoryError return h + @staticmethod def destroy(thrd, h): _c.destroy(thrd._thrd, h) - if objectmodel.we_are_translated(): + if we_are_translated(): debug.debug_print("not using a framework GC: " "stacklet_destroy() may leak") - is_empty_handle = _c.is_empty_handle + is_empty_handle = staticmethod(_c.is_empty_handle) + @staticmethod def get_null_handle(): return _c.null_handle diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -3,7 +3,6 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.tool.staticmethods import StaticMethods NULL_SUSPSTACK = lltype.nullptr(llmemory.GCREF.TO) @@ -68,9 +67,7 @@ return oldsuspstack -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): def new(thrd, callback, arg): gcrootfinder.callback = callback thread_handle = thrd._thrd @@ -78,6 +75,7 @@ h = _c.new(thread_handle, llhelper(_c.run_fn, _new_callback), arg) return get_result_suspstack(h) new._dont_inline_ = True + new = staticmethod(new) def switch(suspstack): # suspstack has a handle to target, i.e. where to switch to @@ -90,10 +88,13 @@ h = _c.switch(h) return get_result_suspstack(h) switch._dont_inline_ = True + switch = staticmethod(switch) + @staticmethod def is_empty_handle(suspstack): return not suspstack + @staticmethod def get_null_handle(): return NULL_SUSPSTACK diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,6 +191,11 @@ def _is_varsize(self): return False + def _contains_value(self, value): + if self is Void: + return True + return isCompatibleType(typeOf(value), self) + NFOUND = object() class ContainerType(LowLevelType): diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -38,12 +38,14 @@ i += s.length() cls.ll_strsetitem_nonneg(s, i, item) + @staticmethod def ll_strsetitem_nonneg(s, i, item): chars = s.chars ll_assert(i >= 0, "negative str getitem index") ll_assert(i < len(chars), "str getitem index out of bound") chars[i] = chr(item) + @staticmethod def ll_stritem_nonneg(s, i): return ord(rstr.LLHelpers.ll_stritem_nonneg(s, i)) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -270,6 +270,7 @@ class LLHelpers(AbstractLLHelpers): from rpython.rtyper.annlowlevel import llstr, llunicode + @staticmethod @jit.elidable def ll_str_mul(s, times): if times < 0: @@ -292,6 +293,7 @@ i += j return newstr + @staticmethod @jit.elidable def ll_char_mul(ch, times): if typeOf(ch) is Char: @@ -308,9 +310,11 @@ j += 1 return newstr + @staticmethod def ll_strlen(s): return len(s.chars) + @staticmethod @signature(types.any(), types.int(), returns=types.any()) def ll_stritem_nonneg(s, i): chars = s.chars @@ -318,6 +322,7 @@ ll_assert(i < len(chars), "str getitem index out of bound") return chars[i] + @staticmethod def ll_chr2str(ch): if typeOf(ch) is Char: malloc = mallocstr @@ -328,6 +333,7 @@ return s # @jit.look_inside_iff(lambda str: jit.isconstant(len(str.chars)) and len(str.chars) == 1) + @staticmethod @jit.oopspec("str.str2unicode(str)") def ll_str2unicode(str): lgt = len(str.chars) @@ -338,6 +344,7 @@ s.chars[i] = cast_primitive(UniChar, str.chars[i]) return s + @staticmethod def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY @@ -347,6 +354,7 @@ b.chars[i] = str.chars[i] return b + @staticmethod @jit.elidable def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 @@ -362,13 +370,17 @@ s.hash = x return x + @staticmethod def ll_length(s): return len(s.chars) + @staticmethod def ll_strfasthash(s): return s.hash # assumes that the hash is already computed + @staticmethod @jit.elidable + @jit.oopspec('stroruni.concat(s1, s2)') def ll_strconcat(s1, s2): len1 = s1.length() len2 = s2.length() @@ -386,8 +398,8 @@ else: newstr.copy_contents(s2, newstr, 0, len1, len2) return newstr - ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' + @staticmethod @jit.elidable def ll_strip(s, ch, left, right): s_len = len(s.chars) @@ -408,6 +420,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_default(s, left, right): s_len = len(s.chars) @@ -428,6 +441,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_multiple(s, s2, left, right): s_len = len(s.chars) @@ -448,6 +462,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_upper(s): s_chars = s.chars @@ -462,6 +477,7 @@ i += 1 return result + @staticmethod @jit.elidable def ll_lower(s): s_chars = s.chars @@ -476,6 +492,7 @@ i += 1 return result + @staticmethod def ll_join(s, length, items): s_chars = s.chars s_len = len(s_chars) @@ -509,7 +526,9 @@ i += 1 return result + @staticmethod @jit.elidable + @jit.oopspec('stroruni.cmp(s1, s2)') def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -531,9 +550,10 @@ return diff i += 1 return len1 - len2 - ll_strcmp.oopspec = 'stroruni.cmp(s1, s2)' + @staticmethod @jit.elidable + @jit.oopspec('stroruni.equal(s1, s2)') def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -551,8 +571,8 @@ return False j += 1 return True - ll_streq.oopspec = 'stroruni.equal(s1, s2)' + @staticmethod @jit.elidable def ll_startswith(s1, s2): len1 = len(s1.chars) @@ -569,11 +589,13 @@ return True + @staticmethod def ll_startswith_char(s, ch): if not len(s.chars): return False return s.chars[0] == ch + @staticmethod @jit.elidable def ll_endswith(s1, s2): len1 = len(s1.chars) @@ -591,11 +613,13 @@ return True + @staticmethod def ll_endswith_char(s, ch): if not len(s.chars): return False return s.chars[len(s.chars) - 1] == ch + @staticmethod @jit.elidable @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find_char(s, ch, start, end): @@ -608,6 +632,7 @@ i += 1 return -1 + @staticmethod @jit.elidable def ll_rfind_char(s, ch, start, end): if end > len(s.chars): @@ -619,6 +644,7 @@ return i return -1 + @staticmethod @jit.elidable def ll_count_char(s, ch, start, end): count = 0 @@ -631,6 +657,7 @@ i += 1 return count + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: @@ -646,6 +673,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: @@ -681,6 +709,7 @@ res = 0 return res + @staticmethod @jit.elidable def ll_search(s1, s2, start, end, mode): count = 0 @@ -768,6 +797,7 @@ return -1 return count + @staticmethod @signature(types.int(), types.any(), returns=types.any()) @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic( items, length)) @@ -802,6 +832,7 @@ i += 1 return result + @staticmethod @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars)) def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder @@ -821,6 +852,7 @@ i += 1 return result + @staticmethod @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) @jit.elidable @@ -836,9 +868,11 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + @staticmethod def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @staticmethod @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): @@ -851,10 +885,12 @@ stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop) + @staticmethod def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) + @staticmethod def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -889,6 +925,7 @@ item.copy_contents(s, item, i, 0, j - i) return res + @staticmethod def ll_split(LIST, s, c, max): count = 1 if max == -1: @@ -920,6 +957,7 @@ item.copy_contents(s, item, prev_pos, 0, last - prev_pos) return res + @staticmethod def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -955,6 +993,7 @@ item.copy_contents(s, item, j, 0, i - j) return res + @staticmethod def ll_rsplit(LIST, s, c, max): count = 1 if max == -1: @@ -986,6 +1025,7 @@ item.copy_contents(s, item, 0, 0, prev_pos) return res + @staticmethod @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1001,6 +1041,7 @@ j += 1 return newstr + @staticmethod @jit.elidable def ll_contains(s, c): chars = s.chars @@ -1012,6 +1053,7 @@ i += 1 return False + @staticmethod @jit.elidable def ll_int(s, base): if not 2 <= base <= 36: @@ -1068,23 +1110,29 @@ # ll_build_push(x, next_string, n-1) # s = ll_build_finish(x) + @staticmethod def ll_build_start(parts_count): return malloc(TEMP, parts_count) + @staticmethod def ll_build_push(builder, next_string, index): builder[index] = next_string + @staticmethod def ll_build_finish(builder): return LLHelpers.ll_join_strs(len(builder), builder) + @staticmethod @specialize.memo() def ll_constant(s): return string_repr.convert_const(s) + @staticmethod @specialize.memo() def ll_constant_unicode(s): return unicode_repr.convert_const(s) + @classmethod def do_stringformat(cls, hop, sourcevarsrepr): s_str = hop.args_s[0] assert s_str.is_constant() @@ -1150,8 +1198,8 @@ hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%' return hop.gendirectcall(cls.ll_join_strs, size, vtemp) - do_stringformat = classmethod(do_stringformat) + @staticmethod @jit.dont_look_inside def ll_string2list(RESLIST, src): length = len(src.chars) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,8 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, - LowLevelType, isCompatibleType) +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -120,14 +119,9 @@ def convert_const(self, value): "Convert the given constant value to the low-level repr of 'self'." - if self.lowleveltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError, TypeError): - realtype = '???' - if realtype != self.lowleveltype: - raise TyperError("convert_const(self = %r, value = %r)" % ( - self, value)) + if not self.lowleveltype._contains_value(value): + raise TyperError("convert_const(self = %r, value = %r)" % ( + self, value)) return value def get_ll_eq_function(self): @@ -356,18 +350,9 @@ lltype = reqtype else: raise TypeError(repr(reqtype)) - # Void Constants can hold any value; - # non-Void Constants must hold a correctly ll-typed value - if lltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError): - realtype = '???' - if not isCompatibleType(realtype, lltype): - raise TyperError("inputconst(reqtype = %s, value = %s):\n" - "expected a %r,\n" - " got a %r" % (reqtype, value, - lltype, realtype)) + if not lltype._contains_value(value): + raise TyperError("inputconst(): expected a %r, got %r" % + (lltype, value)) c = Constant(value) c.concretetype = lltype return c @@ -422,7 +407,8 @@ def __ne__(self, other): return not (self == other) - def build_ll_dummy_value(self): + @property + def ll_dummy_value(self): TYPE = self.TYPE try: return self.rtyper.cache_dummy_values[TYPE] @@ -435,8 +421,6 @@ self.rtyper.cache_dummy_values[TYPE] = p return p - ll_dummy_value = property(build_ll_dummy_value) - # logging/warning diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -9,7 +9,6 @@ from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name -from rpython.tool.staticmethods import StaticMethods from rpython.rlib.rstring import UnicodeBuilder @@ -800,10 +799,8 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem -class AbstractLLHelpers: - __metaclass__ = StaticMethods - +class AbstractLLHelpers(object): + @staticmethod def ll_isdigit(s): from rpython.rtyper.annlowlevel import hlstr @@ -815,6 +812,7 @@ return False return True + @staticmethod def ll_isalpha(s): from rpython.rtyper.annlowlevel import hlstr @@ -826,6 +824,7 @@ return False return True + @staticmethod def ll_isalnum(s): from rpython.rtyper.annlowlevel import hlstr @@ -837,14 +836,17 @@ return False return True + @staticmethod def ll_char_isspace(ch): c = ord(ch) return c == 32 or (9 <= c <= 13) # c in (9, 10, 11, 12, 13, 32) + @staticmethod def ll_char_isdigit(ch): c = ord(ch) return c <= 57 and c >= 48 + @staticmethod def ll_char_isalpha(ch): c = ord(ch) if c >= 97: @@ -852,6 +854,7 @@ else: return 65 <= c <= 90 + @staticmethod def ll_char_isalnum(ch): c = ord(ch) if c >= 65: @@ -862,47 +865,54 @@ else: return 48 <= c <= 57 + @staticmethod def ll_char_isupper(ch): c = ord(ch) return 65 <= c <= 90 + @staticmethod def ll_char_islower(ch): c = ord(ch) return 97 <= c <= 122 + @staticmethod def ll_upper_char(ch): if 'a' <= ch <= 'z': ch = chr(ord(ch) - 32) return ch + @staticmethod def ll_lower_char(ch): if 'A' <= ch <= 'Z': ch = chr(ord(ch) + 32) return ch + @staticmethod def ll_char_hash(ch): return ord(ch) + @staticmethod def ll_unichar_hash(ch): return ord(ch) + @classmethod def ll_str_is_true(cls, s): # check if a string is True, allowing for None return bool(s) and cls.ll_strlen(s) != 0 - ll_str_is_true = classmethod(ll_str_is_true) + @classmethod def ll_stritem_nonneg_checked(cls, s, i): if i >= cls.ll_strlen(s): raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_nonneg_checked = classmethod(ll_stritem_nonneg_checked) + @classmethod def ll_stritem(cls, s, i): if i < 0: i += cls.ll_strlen(s) return cls.ll_stritem_nonneg(s, i) - ll_stritem = classmethod(ll_stritem) + @classmethod def ll_stritem_checked(cls, s, i): length = cls.ll_strlen(s) if i < 0: @@ -910,8 +920,8 @@ if i >= length or i < 0: raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_checked = classmethod(ll_stritem_checked) + @staticmethod def parse_fmt_string(fmt): # we support x, d, s, f, [r] it = iter(fmt) @@ -937,6 +947,7 @@ r.append(curstr) return r + @staticmethod def ll_float(ll_str): from rpython.rtyper.annlowlevel import hlstr from rpython.rlib.rfloat import rstring_to_float @@ -961,6 +972,7 @@ assert end >= 0 return rstring_to_float(s[beg:end + 1]) + @classmethod def ll_splitlines(cls, LIST, ll_str, keep_newlines): from rpython.rtyper.annlowlevel import hlstr s = hlstr(ll_str) @@ -991,4 +1003,3 @@ item = cls.ll_stringslice_startstop(ll_str, j, strlen) res.ll_setitem_fast(list_length, item) return res - ll_splitlines = classmethod(ll_splitlines) diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py deleted file mode 100644 --- a/rpython/tool/staticmethods.py +++ /dev/null @@ -1,14 +0,0 @@ -import types -class AbstractMethods(type): - def __new__(cls, cls_name, bases, cls_dict): - for key, value in cls_dict.iteritems(): - if isinstance(value, types.FunctionType): - cls_dict[key] = cls.decorator(value) - return type.__new__(cls, cls_name, bases, cls_dict) - - -class StaticMethods(AbstractMethods): - """ - Metaclass that turns plain methods into staticmethods. - """ - decorator = staticmethod From noreply at buildbot.pypy.org Wed Oct 8 17:58:36 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 8 Oct 2014 17:58:36 +0200 (CEST) Subject: [pypy-commit] pypy rtyper-stuff: close branch Message-ID: <20141008155836.F3C601C3538@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: rtyper-stuff Changeset: r73840:6d312a8d75bf Date: 2014-10-08 16:50 +0100 http://bitbucket.org/pypy/pypy/changeset/6d312a8d75bf/ Log: close branch From noreply at buildbot.pypy.org Wed Oct 8 20:55:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 20:55:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Trying out a small hack: setting the oopspec to 'jit.not_in_trace()' Message-ID: <20141008185518.CAB6B1D285F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73841:277d77803c7a Date: 2014-10-08 18:42 +0200 http://bitbucket.org/pypy/pypy/changeset/277d77803c7a/ Log: Trying out a small hack: setting the oopspec to 'jit.not_in_trace()' makes the function call disappear from the jit traces. It is still called in interpreted mode, and by the jit tracing and blackholing, but not by the final assembler. diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -25,6 +25,7 @@ OS_THREADLOCALREF_GET = 5 # llop.threadlocalref_get OS_GET_ERRNO = 6 # rposix.get_errno OS_SET_ERRNO = 7 # rposix.set_errno + OS_NOT_IN_TRACE = 8 # for calls not recorded in the jit trace # OS_STR_CONCAT = 22 # "stroruni.concat" OS_STR_SLICE = 23 # "stroruni.slice" @@ -96,6 +97,7 @@ _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_DICT_LOOKUP, + OS_NOT_IN_TRACE, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1562,7 +1562,18 @@ kind = getkind(args[0].concretetype) return SpaceOperation('%s_isvirtual' % kind, args, op.result) elif oopspec_name == 'jit.force_virtual': - return self._handle_oopspec_call(op, args, EffectInfo.OS_JIT_FORCE_VIRTUAL, EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) + return self._handle_oopspec_call(op, args, + EffectInfo.OS_JIT_FORCE_VIRTUAL, + EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) + elif oopspec_name == 'jit.not_in_trace': + # ignore 'args' and use the original 'op.args' + if op.result.concretetype is not lltype.Void: + raise Exception( + "%r: jit.not_in_trace() function must return None" + % (op.args[0],)) + return self._handle_oopspec_call(op, op.args[1:], + EffectInfo.OS_NOT_IN_TRACE, + EffectInfo.EF_CAN_RAISE) else: raise AssertionError("missing support for %r" % oopspec_name) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1427,6 +1427,8 @@ if effect == effectinfo.EF_LOOPINVARIANT: return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, descr, False, False) + if effectinfo.oopspecindex == effectinfo.OS_NOT_IN_TRACE: + return self.metainterp.do_not_in_trace_call(allboxes, descr) exc = effectinfo.check_can_raise() pure = effectinfo.check_is_elidable() return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) @@ -2830,6 +2832,19 @@ if not we_are_translated(): # for llgraph descr._original_func_ = op.getarg(0).value + def do_not_in_trace_call(self, allboxes, descr): + self.clear_exception() + resbox = executor.execute_varargs(self.cpu, self, rop.CALL, + allboxes, descr) + assert resbox is None + if self.last_exc_value_box is not None: + # cannot trace this! it raises, so we have to follow the + # exception-catching path, but the trace doesn't contain + # the call at all + raise SwitchToBlackhole(Counters.ABORT_ESCAPE, + raising_exception=True) + return None + # ____________________________________________________________ class ChangeFrame(jitexc.JitException): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4044,3 +4044,49 @@ res = self.interp_operations(f, [17]) assert res == 42 self.check_operations_history(guard_true=1, guard_false=0) + + def test_not_in_trace(self): + class X: + n = 0 + def g(x): + if we_are_jitted(): + raise NotImplementedError + x.n += 1 + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n', 'token', 'x']) + def f(n): + token = 0 + x = X() + while n >= 0: + jitdriver.jit_merge_point(n=n, x=x, token=token) + if not we_are_jitted(): + token += 1 + g(x) + n -= 1 + return x.n + token * 1000 + + res = self.meta_interp(f, [10]) + assert res == 2003 # two runs before jitting; then one tracing run + self.check_resops(int_add=0, call=0, call_may_force=0) + + def test_not_in_trace_exception(self): + def g(): + if we_are_jitted(): + raise NotImplementedError + raise ValueError + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n']) + def f(n): + while n >= 0: + jitdriver.jit_merge_point(n=n) + try: + g() + except ValueError: + n -= 1 + return 42 + + res = self.meta_interp(f, [10]) + assert res == 42 + self.check_aborted_count(3) From noreply at buildbot.pypy.org Wed Oct 8 20:55:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 20:55:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Expose 'not_in_trace' to app-level Message-ID: <20141008185519.DFE1A1D285F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73842:9f2a008bc6aa Date: 2014-10-08 19:09 +0200 http://bitbucket.org/pypy/pypy/changeset/9f2a008bc6aa/ Log: Expose 'not_in_trace' to app-level diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -7,6 +7,7 @@ interpleveldefs = { 'set_param': 'interp_jit.set_param', 'residual_call': 'interp_jit.residual_call', + 'not_from_assembler': 'interp_jit.W_NotFromAssembler', 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_optimize_hook': 'interp_resop.set_optimize_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,6 +12,9 @@ from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import interp2app from opcode import opmap @@ -144,3 +147,30 @@ '''For testing. Invokes callable(...), but without letting the JIT follow the call.''' return space.call_args(w_callable, __args__) + + +class W_NotFromAssembler(W_Root): + def __init__(self, space, w_callable): + self.space = space + self.w_callable = w_callable + def descr_call(self, __args__): + _call_not_in_trace(self.space, self.w_callable, __args__) + + at jit.not_in_trace +def _call_not_in_trace(space, w_callable, __args__): + space.call_args(w_callable, __args__) + +def not_from_assembler_new(space, w_subtype, w_callable): + return W_NotFromAssembler(space, w_callable) + +W_NotFromAssembler.typedef = TypeDef("not_from_assembler", + __doc__ = """\ +A decorator that returns a callable that invokes the original +callable, but not from the JIT-produced assembler. It is called +from the interpreted mode, and from the JIT creation (pyjitpl) or +exiting (blackhole) steps, but just not from the final assembler. +""", + __new__ = interp2app(not_from_assembler_new), + __call__ = interp2app(W_NotFromAssembler.descr_call), +) +W_NotFromAssembler.typedef.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_not_in_trace.py b/pypy/module/pypyjit/test/test_jit_not_in_trace.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test/test_jit_not_in_trace.py @@ -0,0 +1,11 @@ + +class AppTestJitNotInTrace(object): + spaceconfig = dict(usemodules=('pypyjit',)) + + def test_not_from_assembler(self): + import pypyjit + @pypyjit.not_from_assembler + def f(x, y): + return 42 + r = f(3, 4) + assert r is None diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -194,6 +194,14 @@ return func return decorator +def not_in_trace(func): + """A decorator for a function with no return value. It makes the + function call disappear from the jit traces. It is still called in + interpreted mode, and by the jit tracing and blackholing, but not + by the final assembler.""" + func.oopspec = "jit.not_in_trace()" # note that 'func' may take arguments + return func + @oopspec("jit.isconstant(value)") def isconstant(value): """ From noreply at buildbot.pypy.org Wed Oct 8 20:55:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 20:55:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and comments Message-ID: <20141008185521.159CF1D286C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73843:157413a91b38 Date: 2014-10-08 19:15 +0200 http://bitbucket.org/pypy/pypy/changeset/157413a91b38/ Log: Test and comments diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -158,6 +158,7 @@ @jit.not_in_trace def _call_not_in_trace(space, w_callable, __args__): + # this must return None space.call_args(w_callable, __args__) def not_from_assembler_new(space, w_subtype, w_callable): @@ -169,6 +170,8 @@ callable, but not from the JIT-produced assembler. It is called from the interpreted mode, and from the JIT creation (pyjitpl) or exiting (blackhole) steps, but just not from the final assembler. +The callable should return None! (Its actual return value is +ignored.) """, __new__ = interp2app(not_from_assembler_new), __call__ = interp2app(W_NotFromAssembler.descr_call), diff --git a/pypy/module/pypyjit/test/test_jit_not_in_trace.py b/pypy/module/pypyjit/test/test_jit_not_in_trace.py --- a/pypy/module/pypyjit/test/test_jit_not_in_trace.py +++ b/pypy/module/pypyjit/test/test_jit_not_in_trace.py @@ -9,3 +9,11 @@ return 42 r = f(3, 4) assert r is None + + def test_not_from_assembler_exception(self): + import pypyjit + @pypyjit.not_from_assembler + def f(x, y): + raise ValueError(y, x) + e = raises(ValueError, f, 3, 4) + assert e.value.args == (4, 3) From noreply at buildbot.pypy.org Wed Oct 8 20:55:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 20:55:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Test that jit.not_in_trace functions are really seen during blackholing too Message-ID: <20141008185522.318EA1D286C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73844:abea248cb09f Date: 2014-10-08 19:21 +0200 http://bitbucket.org/pypy/pypy/changeset/abea248cb09f/ Log: Test that jit.not_in_trace functions are really seen during blackholing too diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4090,3 +4090,24 @@ res = self.meta_interp(f, [10]) assert res == 42 self.check_aborted_count(3) + + def test_not_in_trace_blackhole(self): + class X: + seen = 0 + def g(x): + if we_are_jitted(): + raise NotImplementedError + x.seen = 42 + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n']) + def f(n): + while n >= 0: + jitdriver.jit_merge_point(n=n) + n -= 1 + x = X() + g(x) + return x.seen + + res = self.meta_interp(f, [10]) + assert res == 42 From noreply at buildbot.pypy.org Wed Oct 8 20:55:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 20:55:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix: can't force the EF_XXX here during codewriter, but we Message-ID: <20141008185523.60D361D286C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73845:bedbb718f291 Date: 2014-10-08 19:59 +0200 http://bitbucket.org/pypy/pypy/changeset/bedbb718f291/ Log: Translation fix: can't force the EF_XXX here during codewriter, but we can make it irrelevant. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1572,8 +1572,7 @@ "%r: jit.not_in_trace() function must return None" % (op.args[0],)) return self._handle_oopspec_call(op, op.args[1:], - EffectInfo.OS_NOT_IN_TRACE, - EffectInfo.EF_CAN_RAISE) + EffectInfo.OS_NOT_IN_TRACE) else: raise AssertionError("missing support for %r" % oopspec_name) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1393,6 +1393,9 @@ # allboxes = self._build_allboxes(funcbox, argboxes, descr) effectinfo = descr.get_extra_info() + if effectinfo.oopspecindex == effectinfo.OS_NOT_IN_TRACE: + return self.metainterp.do_not_in_trace_call(allboxes, descr) + if (assembler_call or effectinfo.check_forces_virtual_or_virtualizable()): # residual calls require attention to keep virtualizables in-sync @@ -1427,8 +1430,6 @@ if effect == effectinfo.EF_LOOPINVARIANT: return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, descr, False, False) - if effectinfo.oopspecindex == effectinfo.OS_NOT_IN_TRACE: - return self.metainterp.do_not_in_trace_call(allboxes, descr) exc = effectinfo.check_can_raise() pure = effectinfo.check_is_elidable() return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) From noreply at buildbot.pypy.org Wed Oct 8 21:12:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 8 Oct 2014 21:12:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Change the return value from None to the not_from_assembler object Message-ID: <20141008191248.6E62F1D3847@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73846:d1da93ec991a Date: 2014-10-08 21:12 +0200 http://bitbucket.org/pypy/pypy/changeset/d1da93ec991a/ Log: Change the return value from None to the not_from_assembler object itself, to make it directly useful for sys.settrace(). diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -155,10 +155,11 @@ self.w_callable = w_callable def descr_call(self, __args__): _call_not_in_trace(self.space, self.w_callable, __args__) + return self @jit.not_in_trace def _call_not_in_trace(space, w_callable, __args__): - # this must return None + # this _call_not_in_trace() must return None space.call_args(w_callable, __args__) def not_from_assembler_new(space, w_subtype, w_callable): @@ -170,8 +171,14 @@ callable, but not from the JIT-produced assembler. It is called from the interpreted mode, and from the JIT creation (pyjitpl) or exiting (blackhole) steps, but just not from the final assembler. -The callable should return None! (Its actual return value is -ignored.) + +Note that the return value of the callable is ignored, because +there is no reasonable way to guess what it sound be in case the +function is not called. + +This is meant to be used notably in sys.settrace() for coverage- +like tools. For that purpose, if g = not_from_assembler(f), then +'g(*args)' may call 'f(*args)' but it always return g itself. """, __new__ = interp2app(not_from_assembler_new), __call__ = interp2app(W_NotFromAssembler.descr_call), diff --git a/pypy/module/pypyjit/test/test_jit_not_in_trace.py b/pypy/module/pypyjit/test/test_jit_not_in_trace.py --- a/pypy/module/pypyjit/test/test_jit_not_in_trace.py +++ b/pypy/module/pypyjit/test/test_jit_not_in_trace.py @@ -8,7 +8,7 @@ def f(x, y): return 42 r = f(3, 4) - assert r is None + assert r is f def test_not_from_assembler_exception(self): import pypyjit From noreply at buildbot.pypy.org Thu Oct 9 05:59:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:44 +0200 (CEST) Subject: [pypy-commit] pypy default: enable test_flat_iter/test_flat_getitem in test_zjit Message-ID: <20141009035944.C0B421C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73847:88fecada2dac Date: 2014-10-08 04:14 -0400 http://bitbucket.org/pypy/pypy/changeset/88fecada2dac/ Log: enable test_flat_iter/test_flat_getitem in test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -516,12 +516,21 @@ def test_flat_iter(self): result = self.run("flat_iter") assert result == 6 - py.test.skip("don't run for now") self.check_trace_count(1) - self.check_simple_loop({'raw_load': 2, 'float_add': 1, - 'raw_store': 1, 'int_add': 2, - 'int_ge': 1, 'guard_false': 1, - 'arraylen_gc': 1, 'jump': 1}) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + }) def define_flat_getitem(): return ''' @@ -533,17 +542,19 @@ def test_flat_getitem(self): result = self.run("flat_getitem") assert result == 10.0 - py.test.skip("don't run for now") self.check_trace_count(1) - self.check_simple_loop({'raw_load': 1, - 'raw_store': 1, - 'int_lt': 1, - 'int_ge': 1, - 'int_add': 3, - 'guard_true': 1, - 'guard_false': 1, - 'arraylen_gc': 2, - 'jump': 1}) + self.check_simple_loop({ + 'getarrayitem_gc': 2, + 'guard_false': 1, + 'guard_true': 2, + 'int_add': 6, + 'int_ge': 1, + 'int_lt': 2, + 'jump': 1, + 'raw_load': 1, + 'raw_store': 1, + 'setarrayitem_gc': 2, + }) def define_flat_setitem(): return ''' From noreply at buildbot.pypy.org Thu Oct 9 05:59:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:46 +0200 (CEST) Subject: [pypy-commit] pypy default: allow iterator reset to use existing state Message-ID: <20141009035946.04FC51C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73848:1feef8dd0fb2 Date: 2014-10-08 17:52 -0400 http://bitbucket.org/pypy/pypy/changeset/1feef8dd0fb2/ Log: allow iterator reset to use existing state diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -80,7 +80,7 @@ class IterState(object): - _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] + _immutable_fields_ = ['iterator', 'index', 'indices', 'offset'] def __init__(self, iterator, index, indices, offset): self.iterator = iterator @@ -102,8 +102,16 @@ self.strides = strides self.backstrides = backstrides - def reset(self): - return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) + @jit.unroll_safe + def reset(self, state=None): + if state is None: + indices = [0] * len(self.shape_m1) + else: + assert state.iterator is self + indices = state.indices + for i in xrange(self.ndim_m1, -1, -1): + indices[i] = 0 + return IterState(self, 0, indices, self.array.start) @jit.unroll_safe def next(self, state): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -322,7 +322,7 @@ outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) - rights = righti.reset() + rights = righti.reset(rights) lefts = lefti.next(lefts) return result diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -605,7 +605,6 @@ 'raw_load': 2, }) self.check_resops({ - 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, @@ -621,12 +620,11 @@ 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'new_array': 1, 'new_with_vtable': 7, 'raw_load': 6, 'raw_store': 1, 'same_as': 2, - 'setarrayitem_gc': 8, + 'setarrayitem_gc': 10, 'setfield_gc': 22, }) From noreply at buildbot.pypy.org Thu Oct 9 05:59:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:47 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize and test ndarray flatiter get/set Message-ID: <20141009035947.42E451C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73849:7842e64c5847 Date: 2014-10-08 13:23 -0400 http://bitbucket.org/pypy/pypy/changeset/7842e64c5847/ Log: optimize and test ndarray flatiter get/set diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -27,12 +27,9 @@ class W_FlatIterator(W_NDimArray): def __init__(self, arr): self.base = arr + self.iter, self.state = arr.create_iter() # this is needed to support W_NDimArray interface self.implementation = FakeArrayImplementation(self.base) - self.reset() - - def reset(self): - self.iter, self.state = self.base.create_iter() def descr_len(self, space): return space.wrap(self.iter.size) @@ -54,25 +51,39 @@ if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') - self.reset() - base = self.base - start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter, base_state = base.create_iter() - base_state = base_iter.next_skip_x(base_state, start) - if length == 1: - return base_iter.getitem(base_state) - res = W_NDimArray.from_shape(space, [length], base.get_dtype(), - base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, base_state, step) + self.state = self.iter.reset(self.state) + start, stop, step, length = space.decode_index4(w_idx, self.iter.size) + self.state = self.iter.next_skip_x(self.state, start) + try: + if length == 1: + return self.iter.getitem(self.state) + base = self.base + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) + return loop.flatiter_getitem(res, self.iter, self.state, step) + finally: + self.state = self.iter.reset(self.state) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') - base = self.base - start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - arr = convert_to_array(space, w_value) - loop.flatiter_setitem(space, self.base, arr, start, step, length) + start, stop, step, length = space.decode_index4(w_idx, self.iter.size) + self.state = self.iter.reset(self.state) + self.state = self.iter.next_skip_x(self.state, start) + try: + dtype = self.base.get_dtype() + if length == 1: + try: + val = dtype.coerce(space, w_value) + except OperationError: + raise oefmt(space.w_ValueError, "Error setting single item of array.") + self.iter.setitem(self.state, val) + return + arr = convert_to_array(space, w_value) + loop.flatiter_setitem(space, dtype, arr, self.iter, self.state, step, length) + finally: + self.state = self.iter.reset(self.state) def descr_iter(self): return self diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -443,11 +443,8 @@ greens = ['dtype'], reds = 'auto') -def flatiter_setitem(space, arr, val, start, step, length): - dtype = arr.get_dtype() - arr_iter, arr_state = arr.create_iter() +def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() - arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) val = val_iter.getitem(val_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -475,8 +475,10 @@ return repeat(space, self, repeats, w_axis) def descr_set_flatiter(self, space, w_obj): + iter, state = self.create_iter() + dtype = self.get_dtype() arr = convert_to_array(space, w_obj) - loop.flatiter_setitem(space, self, arr, 0, 1, self.get_size()) + loop.flatiter_setitem(space, dtype, arr, iter, state, 1, iter.size) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2727,15 +2727,35 @@ b.next() assert b.index == 3 assert b.coords == (0, 3) + b.next() assert b[3] == 3 - assert (b[::3] == [0, 3, 6, 9]).all() - assert (b[2::5] == [2, 7]).all() - assert b[-2] == 8 - raises(IndexError, "b[11]") - raises(IndexError, "b[-11]") - raises(IndexError, 'b[0, 1]') assert b.index == 0 assert b.coords == (0, 0) + b.next() + assert (b[::3] == [0, 3, 6, 9]).all() + assert b.index == 0 + assert b.coords == (0, 0) + b.next() + assert (b[2::5] == [2, 7]).all() + assert b.index == 0 + assert b.coords == (0, 0) + b.next() + assert b[-2] == 8 + assert b.index == 0 + assert b.coords == (0, 0) + b.next() + raises(IndexError, "b[11]") + assert b.index == 0 + assert b.coords == (0, 0) + b.next() + raises(IndexError, "b[-11]") + assert b.index == 0 + assert b.coords == (0, 0) + b.next() + exc = raises(IndexError, 'b[0, 1]') + assert str(exc.value) == "unsupported iterator index" + assert b.index == 1 + assert b.coords == (0, 1) def test_flatiter_setitem(self): from numpypy import arange, array @@ -2743,9 +2763,25 @@ b = a.T.flat b[6::2] = [-1, -2] assert (a == [[0, 1, -1, 3], [4, 5, 6, -1], [8, 9, -2, 11]]).all() + assert b[2] == 8 + assert b.index == 0 + b.next() + b[6::2] = [-21, -42] + assert (a == [[0, 1, -21, 3], [4, 5, 6, -21], [8, 9, -42, 11]]).all() b[0:2] = [[[100]]] assert(a[0,0] == 100) assert(a[1,0] == 100) + b.next() + assert b.index == 1 + exc = raises(ValueError, "b[0] = [1, 2]") + assert str(exc.value) == "Error setting single item of array." + assert b.index == 0 + b.next() + raises(IndexError, "b[100] = 42") + assert b.index == 1 + exc = raises(IndexError, "b[0, 1] = 42") + assert str(exc.value) == "unsupported iterator index" + assert b.index == 1 def test_flatiter_ops(self): from numpypy import arange, array diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -77,3 +77,119 @@ --TICK-- jump(p0, p1, p3, p6, p7, p12, p14, f86, p18, i87, i62, p41, i58, p47, i40, i64, i70, descr=...) """) + + def test_array_flatiter_getitem_single(self): + def main(): + import _numpypy.multiarray as np + arr = np.zeros((1024, 16)) + 42 + ai = arr.flat + i = 0 + while i < arr.size: + a = ai[i] + i += 1 + return a + log = self.run(main, []) + assert log.result == 42.0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i123 = int_lt(i112, i44) + guard_true(i123, descr=...) + p124 = getfield_gc_pure(p121, descr=) + setarrayitem_gc(p124, 1, 0, descr=) + setarrayitem_gc(p124, 0, 0, descr=) + i126 = int_lt(i112, i65) + guard_true(i126, descr=...) + i127 = int_sub(i75, i112) + i128 = int_lt(0, i127) + guard_false(i128, descr=...) + i129 = int_floordiv(i112, i75) + i130 = int_mul(i129, i75) + i131 = int_sub(i112, i130) + i132 = int_rshift(i131, 63) + i133 = int_add(i129, i132) + i134 = int_mul(i133, i75) + i135 = int_sub(i112, i134) + i136 = int_mul(i91, i135) + i137 = int_add(i64, i136) + i138 = int_sub(i98, i133) + setarrayitem_gc(p124, 1, i135, descr=) + i139 = int_lt(0, i138) + guard_true(i139, descr=...) + i140 = int_mul(i102, i133) + i141 = int_add(i137, i140) + f142 = raw_load(i108, i141, descr=) + i143 = int_add(i112, 1) + setarrayitem_gc(p124, 1, 0, descr=) + guard_not_invalidated(descr=...) + i144 = getfield_raw(ticker_address, descr=) + i145 = int_lt(i144, 0) + guard_false(i145, descr=...) + p146 = new_with_vtable(...) + setfield_gc(p146, p49, descr=) + setfield_gc(p146, p124, descr=) + setfield_gc(p146, i64, descr=) + setfield_gc(p146, 0, descr=) + setfield_gc(p16, p146, descr=) + i147 = arraylen_gc(p72, descr=) + i148 = arraylen_gc(p90, descr=) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i143, f142, p26, i44, p146, i65, p49, i64, i75, i91, i98, i102, i108, p72, p90, descr=...) + """) + + def test_array_flatiter_setitem_single(self): + def main(): + import _numpypy.multiarray as np + arr = np.empty((1024, 16)) + ai = arr.flat + i = 0 + while i < arr.size: + ai[i] = 42.0 + i += 1 + return ai[-1] + log = self.run(main, []) + assert log.result == 42.0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i126 = int_lt(i115, i42) + guard_true(i126, descr=...) + i127 = int_lt(i115, i48) + guard_true(i127, descr=...) + p128 = getfield_gc_pure(p124, descr=) + i129 = int_sub(i73, i115) + setarrayitem_gc(p128, 1, 0, descr=) + setarrayitem_gc(p128, 0, 0, descr=) + i131 = int_lt(0, i129) + guard_false(i131, descr=...) + i132 = int_floordiv(i115, i73) + i133 = int_mul(i132, i73) + i134 = int_sub(i115, i133) + i135 = int_rshift(i134, 63) + i136 = int_add(i132, i135) + i137 = int_mul(i136, i73) + i138 = int_sub(i115, i137) + i139 = int_mul(i89, i138) + i140 = int_add(i66, i139) + i141 = int_sub(i96, i136) + setarrayitem_gc(p128, 1, i138, descr=) + i142 = int_lt(0, i141) + guard_true(i142, descr=...) + i143 = int_mul(i100, i136) + i144 = int_add(i140, i143) + setarrayitem_gc(p128, 0, i136, descr=) + guard_not_invalidated(descr=...) + raw_store(i111, i144, 42.000000, descr=) + i146 = int_add(i115, 1) + i147 = getfield_raw(ticker_address, descr=) + setarrayitem_gc(p128, 1, 0, descr=) + setarrayitem_gc(p128, 0, 0, descr=) + i149 = int_lt(i147, 0) + guard_false(i149, descr=...) + p150 = new_with_vtable(...) + setfield_gc(p150, p47, descr=) + setfield_gc(p150, p128, descr=) + setfield_gc(p150, i66, descr=) + setfield_gc(p150, 0, descr=) + setfield_gc(p16, p150, descr=) + i151 = arraylen_gc(p70, descr=) + i152 = arraylen_gc(p88, descr=) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i146, i42, i48, p150, i73, i66, p47, i89, i96, i100, i111, p70, p88, descr=...) + """) From noreply at buildbot.pypy.org Thu Oct 9 05:59:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:48 +0200 (CEST) Subject: [pypy-commit] pypy default: add numpy iterator goto method Message-ID: <20141009035948.652851C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73850:bb3689880ac8 Date: 2014-10-08 18:42 -0400 http://bitbucket.org/pypy/pypy/changeset/bb3689880ac8/ Log: add numpy iterator goto method diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -51,16 +51,15 @@ if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') - self.state = self.iter.reset(self.state) - start, stop, step, length = space.decode_index4(w_idx, self.iter.size) - self.state = self.iter.next_skip_x(self.state, start) try: + start, stop, step, length = space.decode_index4(w_idx, self.iter.size) + state = self.iter.goto(start) if length == 1: - return self.iter.getitem(self.state) + return self.iter.getitem(state) base = self.base res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, self.iter, self.state, step) + return loop.flatiter_getitem(res, self.iter, state, step) finally: self.state = self.iter.reset(self.state) @@ -69,19 +68,18 @@ space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') start, stop, step, length = space.decode_index4(w_idx, self.iter.size) - self.state = self.iter.reset(self.state) - self.state = self.iter.next_skip_x(self.state, start) try: + state = self.iter.goto(start) dtype = self.base.get_dtype() if length == 1: try: val = dtype.coerce(space, w_value) except OperationError: raise oefmt(space.w_ValueError, "Error setting single item of array.") - self.iter.setitem(self.state, val) + self.iter.setitem(state, val) return arr = convert_to_array(space, w_value) - loop.flatiter_setitem(space, dtype, arr, self.iter, self.state, step, length) + loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: self.state = self.iter.reset(self.state) diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -91,7 +91,7 @@ class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]'] + 'strides[*]', 'backstrides[*]', 'factors[*]'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) @@ -102,6 +102,15 @@ self.strides = strides self.backstrides = backstrides + ndim = len(shape) + factors = [0] * ndim + for i in xrange(ndim): + if i == 0: + factors[ndim-1] = 1 + else: + factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] + self.factors = factors + @jit.unroll_safe def reset(self, state=None): if state is None: @@ -154,6 +163,16 @@ assert step > 0 return IterState(self, index, indices, offset) + @jit.unroll_safe + def goto(self, index): + # XXX simplify if self.contiguous (offset = start + index * elsize) + offset = self.array.start + current = index + for i in xrange(len(self.shape_m1)): + offset += (current / self.factors[i]) * self.strides[i] + current %= self.factors[i] + return IterState(self, index, None, offset) + def done(self, state): assert state.iterator is self return state.index >= self.size diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -435,7 +435,7 @@ while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) ri.setitem(rs, base_iter.getitem(base_state)) - base_state = base_iter.next_skip_x(base_state, step) + base_state = base_iter.goto(base_state.index + step) rs = ri.next(rs) return res @@ -454,7 +454,7 @@ val = val.convert_to(space, dtype) arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_state = arr_iter.next_skip_x(arr_state, step) + arr_state = arr_iter.goto(arr_state.index + step) val_state = val_iter.next(val_state) length -= 1 diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -98,3 +98,21 @@ assert s.indices == [0,1] assert s.offset == 3 assert i.done(s) + + def test_iterator_goto(self): + shape = [3, 5] + strides = [1, 3] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [2, 12] + a = MockArray() + a.start = 42 + i = ArrayIter(a, support.product(shape), shape, + strides, backstrides) + s = i.reset() + assert s.index == 0 + assert s.indices == [0, 0] + assert s.offset == a.start + s = i.goto(11) + assert s.index == 11 + assert s.indices is None + assert s.offset == a.start + 5 diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -544,16 +544,23 @@ assert result == 10.0 self.check_trace_count(1) self.check_simple_loop({ - 'getarrayitem_gc': 2, + 'arraylen_gc': 2, + 'getarrayitem_gc': 1, 'guard_false': 1, - 'guard_true': 2, - 'int_add': 6, + 'guard_true': 1, + 'int_add': 7, + 'int_and': 1, + 'int_floordiv': 1, 'int_ge': 1, - 'int_lt': 2, + 'int_lt': 1, + 'int_mod': 1, + 'int_mul': 2, + 'int_rshift': 2, + 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_gc': 2, + 'setarrayitem_gc': 1, }) def define_flat_setitem(): @@ -569,17 +576,23 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'getarrayitem_gc': 2, + 'arraylen_gc': 2, + 'getarrayitem_gc': 1, 'guard_not_invalidated': 1, - 'guard_true': 3, - 'int_add': 6, + 'guard_true': 2, + 'int_add': 7, + 'int_and': 1, + 'int_floordiv': 1, 'int_gt': 1, - 'int_lt': 2, - 'int_sub': 1, + 'int_lt': 1, + 'int_mod': 1, + 'int_mul': 2, + 'int_rshift': 2, + 'int_sub': 2, 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_gc': 2, + 'setarrayitem_gc': 1, }) def define_dot(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -92,47 +92,50 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i123 = int_lt(i112, i44) - guard_true(i123, descr=...) - p124 = getfield_gc_pure(p121, descr=) - setarrayitem_gc(p124, 1, 0, descr=) - setarrayitem_gc(p124, 0, 0, descr=) - i126 = int_lt(i112, i65) + i125 = int_lt(i117, i44) + guard_true(i125, descr=...) + i126 = int_lt(i117, i50) guard_true(i126, descr=...) - i127 = int_sub(i75, i112) - i128 = int_lt(0, i127) - guard_false(i128, descr=...) - i129 = int_floordiv(i112, i75) - i130 = int_mul(i129, i75) - i131 = int_sub(i112, i130) - i132 = int_rshift(i131, 63) - i133 = int_add(i129, i132) - i134 = int_mul(i133, i75) - i135 = int_sub(i112, i134) - i136 = int_mul(i91, i135) - i137 = int_add(i64, i136) - i138 = int_sub(i98, i133) - setarrayitem_gc(p124, 1, i135, descr=) - i139 = int_lt(0, i138) - guard_true(i139, descr=...) - i140 = int_mul(i102, i133) - i141 = int_add(i137, i140) - f142 = raw_load(i108, i141, descr=) - i143 = int_add(i112, 1) - setarrayitem_gc(p124, 1, 0, descr=) + i127 = int_floordiv(i117, i61) + i128 = int_mul(i127, i61) + i129 = int_sub(i117, i128) + i130 = int_rshift(i129, 63) + i131 = int_add(i127, i130) + i132 = int_mul(i131, i71) + i133 = int_add(i55, i132) + i134 = int_mod(i117, i61) + i135 = int_rshift(i134, 63) + i136 = int_and(i61, i135) + i137 = int_add(i134, i136) + i138 = int_floordiv(i137, i80) + i139 = int_mul(i138, i80) + i140 = int_sub(i137, i139) + i141 = int_rshift(i140, 63) + i142 = int_add(i138, i141) + i143 = int_mul(i142, i88) + i144 = int_add(i133, i143) + i145 = int_mod(i137, i80) + i146 = int_rshift(i145, 63) + i147 = int_and(i80, i146) + i148 = int_add(i145, i147) + f149 = raw_load(i100, i144, descr=) + p150 = getfield_gc_pure(p123, descr=) + i151 = int_add(i117, 1) + setarrayitem_gc(p150, 1, 0, descr=) + setarrayitem_gc(p150, 0, 0, descr=) guard_not_invalidated(descr=...) - i144 = getfield_raw(ticker_address, descr=) - i145 = int_lt(i144, 0) - guard_false(i145, descr=...) - p146 = new_with_vtable(...) - setfield_gc(p146, p49, descr=) - setfield_gc(p146, p124, descr=) - setfield_gc(p146, i64, descr=) - setfield_gc(p146, 0, descr=) - setfield_gc(p16, p146, descr=) - i147 = arraylen_gc(p72, descr=) - i148 = arraylen_gc(p90, descr=) - jump(p0, p1, p3, p6, p7, p12, p14, p16, i143, f142, p26, i44, p146, i65, p49, i64, i75, i91, i98, i102, i108, p72, p90, descr=...) + i154 = getfield_raw(ticker_address, descr=) + i155 = int_lt(i154, 0) + guard_false(i155, descr=...) + p156 = new_with_vtable(...) + setfield_gc(p156, i55, descr=) + setfield_gc(p156, 0, descr=) + setfield_gc(p156, p150, descr=) + setfield_gc(p156, p49, descr=) + setfield_gc(p16, p156, descr=) + i157 = arraylen_gc(p60, descr=) + i158 = arraylen_gc(p70, descr=) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i151, f149, p26, i44, i50, i61, i71, i55, i80, i88, i100, p156, p49, p60, p70, descr=...) """) def test_array_flatiter_setitem_single(self): @@ -149,47 +152,48 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i126 = int_lt(i115, i42) - guard_true(i126, descr=...) - i127 = int_lt(i115, i48) - guard_true(i127, descr=...) - p128 = getfield_gc_pure(p124, descr=) - i129 = int_sub(i73, i115) - setarrayitem_gc(p128, 1, 0, descr=) - setarrayitem_gc(p128, 0, 0, descr=) - i131 = int_lt(0, i129) - guard_false(i131, descr=...) - i132 = int_floordiv(i115, i73) - i133 = int_mul(i132, i73) - i134 = int_sub(i115, i133) - i135 = int_rshift(i134, 63) - i136 = int_add(i132, i135) - i137 = int_mul(i136, i73) - i138 = int_sub(i115, i137) - i139 = int_mul(i89, i138) - i140 = int_add(i66, i139) - i141 = int_sub(i96, i136) - setarrayitem_gc(p128, 1, i138, descr=) - i142 = int_lt(0, i141) - guard_true(i142, descr=...) - i143 = int_mul(i100, i136) - i144 = int_add(i140, i143) - setarrayitem_gc(p128, 0, i136, descr=) + i128 = int_lt(i120, i42) + guard_true(i128, descr=...) + i129 = int_lt(i120, i48) + guard_true(i129, descr=...) + i130 = int_floordiv(i120, i59) + i131 = int_mul(i130, i59) + i132 = int_sub(i120, i131) + i133 = int_rshift(i132, 63) + i134 = int_add(i130, i133) + i135 = int_mul(i134, i69) + i136 = int_add(i53, i135) + i137 = int_mod(i120, i59) + i138 = int_rshift(i137, 63) + i139 = int_and(i59, i138) + i140 = int_add(i137, i139) + i141 = int_floordiv(i140, i78) + i142 = int_mul(i141, i78) + i143 = int_sub(i140, i142) + i144 = int_rshift(i143, 63) + i145 = int_add(i141, i144) + i146 = int_mul(i145, i86) + i147 = int_add(i136, i146) + i148 = int_mod(i140, i78) + i149 = int_rshift(i148, 63) + i150 = int_and(i78, i149) + i151 = int_add(i148, i150) guard_not_invalidated(descr=...) - raw_store(i111, i144, 42.000000, descr=) - i146 = int_add(i115, 1) - i147 = getfield_raw(ticker_address, descr=) - setarrayitem_gc(p128, 1, 0, descr=) - setarrayitem_gc(p128, 0, 0, descr=) - i149 = int_lt(i147, 0) - guard_false(i149, descr=...) - p150 = new_with_vtable(...) - setfield_gc(p150, p47, descr=) - setfield_gc(p150, p128, descr=) - setfield_gc(p150, i66, descr=) - setfield_gc(p150, 0, descr=) - setfield_gc(p16, p150, descr=) - i151 = arraylen_gc(p70, descr=) - i152 = arraylen_gc(p88, descr=) - jump(p0, p1, p3, p6, p7, p12, p14, p16, i146, i42, i48, p150, i73, i66, p47, i89, i96, i100, i111, p70, p88, descr=...) + raw_store(i103, i147, 42.000000, descr=) + p152 = getfield_gc_pure(p126, descr=) + i153 = int_add(i120, 1) + i154 = getfield_raw(ticker_address, descr=) + setarrayitem_gc(p152, 1, 0, descr=) + setarrayitem_gc(p152, 0, 0, descr=) + i157 = int_lt(i154, 0) + guard_false(i157, descr=...) + p158 = new_with_vtable(...) + setfield_gc(p158, i53, descr=) + setfield_gc(p158, 0, descr=) + setfield_gc(p158, p152, descr=) + setfield_gc(p158, p47, descr=) + setfield_gc(p16, p158, descr=) + i159 = arraylen_gc(p58, descr=) + i160 = arraylen_gc(p68, descr=) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i153, i42, i48, i59, i69, i53, i78, i86, p47, i103, p158, p58, p68, descr=...) """) From noreply at buildbot.pypy.org Thu Oct 9 05:59:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:49 +0200 (CEST) Subject: [pypy-commit] pypy default: remove unused iterator next_skip_x Message-ID: <20141009035949.9B3F51C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73851:0582c24e67be Date: 2014-10-08 18:36 -0400 http://bitbucket.org/pypy/pypy/changeset/0582c24e67be/ Log: remove unused iterator next_skip_x diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -35,10 +35,6 @@ [x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] we can go faster. All the calculations happen in next() - -next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot guarantee that we only overflow one single shape -dimension, perhaps we could overflow times in one big step. """ from rpython.rlib import jit from pypy.module.micronumpy import support @@ -140,30 +136,6 @@ return IterState(self, index, indices, offset) @jit.unroll_safe - def next_skip_x(self, state, step): - assert state.iterator is self - assert step >= 0 - if step == 0: - return state - index = state.index + step - indices = state.indices - offset = state.offset - for i in xrange(self.ndim_m1, -1, -1): - idx = indices[i] - if idx < (self.shape_m1[i] + 1) - step: - indices[i] = idx + step - offset += self.strides[i] * step - break - else: - rem_step = (idx + step) // (self.shape_m1[i] + 1) - cur_step = step - rem_step * (self.shape_m1[i] + 1) - indices[i] = idx + cur_step - offset += self.strides[i] * cur_step - step = rem_step - assert step > 0 - return IterState(self, index, indices, offset) - - @jit.unroll_safe def goto(self, index): # XXX simplify if self.contiguous (offset = start + index * elsize) offset = self.array.start diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -49,56 +49,6 @@ assert s.offset == 1 assert s.indices == [1,0] - def test_iterator_step(self): - #iteration in C order with #contiguous layout => strides[-1] is 1 - #skip less than the shape - shape = [3, 5] - strides = [5, 1] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [10, 4] - i = ArrayIter(MockArray, support.product(shape), shape, - strides, backstrides) - s = i.reset() - s = i.next_skip_x(s, 2) - s = i.next_skip_x(s, 2) - s = i.next_skip_x(s, 2) - assert s.offset == 6 - assert not i.done(s) - assert s.indices == [1,1] - #And for some big skips - s = i.next_skip_x(s, 5) - assert s.offset == 11 - assert s.indices == [2,1] - s = i.next_skip_x(s, 5) - # Note: the offset does not overflow but recycles, - # this is good for broadcast - assert s.offset == 1 - assert s.indices == [0,1] - assert i.done(s) - - #Now what happens if the array is transposed? strides[-1] != 1 - # therefore layout is non-contiguous - strides = [1, 3] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [2, 12] - i = ArrayIter(MockArray, support.product(shape), shape, - strides, backstrides) - s = i.reset() - s = i.next_skip_x(s, 2) - s = i.next_skip_x(s, 2) - s = i.next_skip_x(s, 2) - assert s.offset == 4 - assert s.indices == [1,1] - assert not i.done(s) - s = i.next_skip_x(s, 5) - assert s.offset == 5 - assert s.indices == [2,1] - assert not i.done(s) - s = i.next_skip_x(s, 5) - assert s.indices == [0,1] - assert s.offset == 3 - assert i.done(s) - def test_iterator_goto(self): shape = [3, 5] strides = [1, 3] From noreply at buildbot.pypy.org Thu Oct 9 05:59:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:50 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize iterator goto if array is contiguous Message-ID: <20141009035950.C58651C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73852:2cc2e4c576c5 Date: 2014-10-08 19:34 -0400 http://bitbucket.org/pypy/pypy/changeset/2cc2e4c576c5/ Log: optimize iterator goto if array is contiguous diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -19,6 +19,7 @@ 'strides[*]', 'backstrides[*]', 'order'] start = 0 parent = None + flags = 0 # JIT hints that length of all those arrays is a constant diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -74,6 +74,9 @@ WRAP = 1 RAISE = 2 +ARRAY_C_CONTIGUOUS = 0x0001 +ARRAY_F_CONTIGUOUS = 0x0002 + LITTLE = '<' BIG = '>' NATIVE = '=' diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -2,6 +2,46 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy import constants as NPY + + +def enable_flags(arr, flags): + arr.flags |= flags + + +def clear_flags(arr, flags): + arr.flags &= ~flags + + +def _update_contiguous_flags(arr): + shape = arr.shape + strides = arr.strides + + is_c_contig = True + sd = arr.dtype.elsize + for i in range(len(shape) - 1, -1, -1): + dim = shape[i] + if strides[i] != sd: + is_c_contig = False + break + if dim == 0: + break + sd *= dim + if is_c_contig: + enable_flags(arr, NPY.ARRAY_C_CONTIGUOUS) + else: + clear_flags(arr, NPY.ARRAY_C_CONTIGUOUS) + + sd = arr.dtype.elsize + for i in range(len(shape)): + dim = shape[i] + if strides[i] != sd: + clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + return + if dim == 0: + break + sd *= dim + enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -37,8 +37,9 @@ All the calculations happen in next() """ from rpython.rlib import jit -from pypy.module.micronumpy import support +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.flagsobj import _update_contiguous_flags class PureShapeIter(object): @@ -86,11 +87,14 @@ class ArrayIter(object): - _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', + _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]', 'factors[*]'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) + _update_contiguous_flags(array) + self.contiguous = array.flags & NPY.ARRAY_C_CONTIGUOUS + self.array = array self.size = size self.ndim_m1 = len(shape) - 1 @@ -137,12 +141,14 @@ @jit.unroll_safe def goto(self, index): - # XXX simplify if self.contiguous (offset = start + index * elsize) offset = self.array.start - current = index - for i in xrange(len(self.shape_m1)): - offset += (current / self.factors[i]) * self.strides[i] - current %= self.factors[i] + if self.contiguous: + offset += index * self.array.dtype.elsize + else: + current = index + for i in xrange(len(self.shape_m1)): + offset += (current / self.factors[i]) * self.strides[i] + current %= self.factors[i] return IterState(self, index, None, offset) def done(self, state): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -544,19 +544,13 @@ assert result == 10.0 self.check_trace_count(1) self.check_simple_loop({ - 'arraylen_gc': 2, 'getarrayitem_gc': 1, 'guard_false': 1, 'guard_true': 1, - 'int_add': 7, - 'int_and': 1, - 'int_floordiv': 1, + 'int_add': 5, 'int_ge': 1, 'int_lt': 1, - 'int_mod': 1, - 'int_mul': 2, - 'int_rshift': 2, - 'int_sub': 1, + 'int_mul': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, @@ -576,19 +570,14 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'arraylen_gc': 2, 'getarrayitem_gc': 1, 'guard_not_invalidated': 1, 'guard_true': 2, - 'int_add': 7, - 'int_and': 1, - 'int_floordiv': 1, + 'int_add': 5, 'int_gt': 1, 'int_lt': 1, - 'int_mod': 1, - 'int_mul': 2, - 'int_rshift': 2, - 'int_sub': 2, + 'int_mul': 1, + 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, From noreply at buildbot.pypy.org Thu Oct 9 05:59:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:51 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize iterator next if array is contiguous Message-ID: <20141009035951.E0D561C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73853:c9577be74d54 Date: 2014-10-08 20:21 -0400 http://bitbucket.org/pypy/pypy/changeset/c9577be74d54/ Log: optimize iterator next if array is contiguous diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -45,6 +45,7 @@ return space.wrap(self.state.index) def descr_coords(self, space): + self.state = self.iter.update(self.state) return space.newtuple([space.wrap(c) for c in self.state.indices]) def descr_getitem(self, space, w_idx): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -93,7 +93,8 @@ def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) _update_contiguous_flags(array) - self.contiguous = array.flags & NPY.ARRAY_C_CONTIGUOUS + self.contiguous = (array.flags & NPY.ARRAY_C_CONTIGUOUS and + array.shape == shape and array.strides == strides) self.array = array self.size = size @@ -128,15 +129,18 @@ index = state.index + 1 indices = state.indices offset = state.offset - for i in xrange(self.ndim_m1, -1, -1): - idx = indices[i] - if idx < self.shape_m1[i]: - indices[i] = idx + 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] + if self.contiguous: + offset += self.array.dtype.elsize + else: + for i in xrange(self.ndim_m1, -1, -1): + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] return IterState(self, index, indices, offset) @jit.unroll_safe @@ -151,6 +155,21 @@ current %= self.factors[i] return IterState(self, index, None, offset) + @jit.unroll_safe + def update(self, state): + assert state.iterator is self + if not self.contiguous: + return state + current = state.index + indices = state.indices + for i in xrange(len(self.shape_m1)): + if self.factors[i] != 0: + indices[i] = current / self.factors[i] + current %= self.factors[i] + else: + indices[i] = 0 + return IterState(self, state.index, indices, state.offset) + def done(self, state): assert state.iterator is self return state.index >= self.size diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -229,6 +229,7 @@ dtype=dtype) assert not arr_iter.done(arr_state) w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + out_state = out_iter.update(out_state) if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) @@ -360,6 +361,7 @@ while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(arr_state): + arr_state = arr_iter.update(arr_state) for d in dims: res_iter.setitem(res_state, box(arr_state.indices[d])) res_state = res_iter.next(res_state) @@ -453,9 +455,10 @@ else: val = val.convert_to(space, dtype) arr_iter.setitem(arr_state, val) - # need to repeat i_nput values until all assignments are done arr_state = arr_iter.goto(arr_state.index + step) val_state = val_iter.next(val_state) + if val_iter.done(val_state): + val_state = val_iter.reset(val_state) length -= 1 fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -313,6 +313,7 @@ # create an iterator for each operand for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + it.contiguous = False self.iters.append((it, it.reset())) def set_op_axes(self, space, w_op_axes): diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -3,7 +3,15 @@ class MockArray(object): - start = 0 + flags = 0 + + class dtype: + elsize = 1 + + def __init__(self, shape, strides, start=0): + self.shape = shape + self.strides = strides + self.start = start class TestIterDirect(object): @@ -14,19 +22,24 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIter(MockArray, support.product(shape), shape, + i = ArrayIter(MockArray(shape, strides), support.product(shape), shape, strides, backstrides) + assert i.contiguous s = i.reset() s = i.next(s) s = i.next(s) s = i.next(s) assert s.offset == 3 assert not i.done(s) + assert s.indices == [0,0] + s = i.update(s) assert s.indices == [0,3] #cause a dimension overflow s = i.next(s) s = i.next(s) assert s.offset == 5 + assert s.indices == [0,3] + s = i.update(s) assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 @@ -34,8 +47,9 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIter(MockArray, support.product(shape), shape, + i = ArrayIter(MockArray(shape, strides), support.product(shape), shape, strides, backstrides) + assert not i.contiguous s = i.reset() s = i.next(s) s = i.next(s) @@ -54,10 +68,10 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - a = MockArray() - a.start = 42 + a = MockArray(shape, strides, 42) i = ArrayIter(a, support.product(shape), shape, strides, backstrides) + assert not i.contiguous s = i.reset() assert s.index == 0 assert s.indices == [0, 0] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -101,17 +101,17 @@ self.check_trace_count(1) self.check_simple_loop({ 'float_add': 1, - 'getarrayitem_gc': 3, + 'getarrayitem_gc': 1, 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 3, - 'int_add': 9, + 'guard_true': 1, + 'int_add': 7, 'int_ge': 1, - 'int_lt': 3, + 'int_lt': 1, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, + 'setarrayitem_gc': 1, }) def define_pow(): @@ -130,18 +130,18 @@ 'float_eq': 3, 'float_mul': 2, 'float_ne': 1, - 'getarrayitem_gc': 3, + 'getarrayitem_gc': 1, 'guard_false': 4, 'guard_not_invalidated': 1, - 'guard_true': 5, - 'int_add': 9, + 'guard_true': 3, + 'int_add': 7, 'int_ge': 1, 'int_is_true': 1, - 'int_lt': 3, + 'int_lt': 1, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, + 'setarrayitem_gc': 1, }) def define_pow_int(): @@ -159,17 +159,17 @@ del get_stats().loops[0] # we don't care about it self.check_simple_loop({ 'call': 1, - 'getarrayitem_gc': 3, + 'getarrayitem_gc': 1, 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 3, - 'int_add': 9, + 'guard_true': 1, + 'int_add': 7, 'int_ge': 1, - 'int_lt': 3, + 'int_lt': 1, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, + 'setarrayitem_gc': 1, }) def define_sum(): @@ -384,17 +384,17 @@ self.check_trace_count(1) self.check_simple_loop({ 'float_add': 1, - 'getarrayitem_gc': 3, + 'getarrayitem_gc': 2, 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 3, - 'int_add': 9, + 'guard_true': 2, + 'int_add': 8, 'int_ge': 1, - 'int_lt': 3, + 'int_lt': 2, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, + 'setarrayitem_gc': 2, }) def define_take(): @@ -519,17 +519,13 @@ self.check_trace_count(1) self.check_simple_loop({ 'float_add': 1, - 'getarrayitem_gc': 3, 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 3, - 'int_add': 9, + 'int_add': 6, 'int_ge': 1, - 'int_lt': 3, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, }) def define_flat_getitem(): @@ -544,17 +540,13 @@ assert result == 10.0 self.check_trace_count(1) self.check_simple_loop({ - 'getarrayitem_gc': 1, 'guard_false': 1, - 'guard_true': 1, - 'int_add': 5, + 'int_add': 4, 'int_ge': 1, - 'int_lt': 1, 'int_mul': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_gc': 1, }) def define_flat_setitem(): @@ -570,18 +562,17 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'getarrayitem_gc': 1, + 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 2, - 'int_add': 5, + 'guard_true': 1, + 'int_add': 4, + 'int_ge': 1, 'int_gt': 1, - 'int_lt': 1, 'int_mul': 1, 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_gc': 1, }) def define_dot(): @@ -609,24 +600,25 @@ self.check_resops({ 'float_add': 2, 'float_mul': 2, - 'getarrayitem_gc': 7, - 'getarrayitem_gc_pure': 15, - 'getfield_gc_pure': 52, + 'getarrayitem_gc': 4, + 'getarrayitem_gc_pure': 9, + 'getfield_gc_pure': 46, 'guard_class': 4, - 'guard_false': 14, + 'guard_false': 12, 'guard_not_invalidated': 2, - 'guard_true': 13, - 'int_add': 25, + 'guard_true': 12, + 'int_add': 18, 'int_ge': 4, - 'int_le': 8, - 'int_lt': 11, - 'int_sub': 4, + 'int_is_true': 3, + 'int_le': 5, + 'int_lt': 8, + 'int_sub': 3, 'jump': 3, 'new_with_vtable': 7, 'raw_load': 6, 'raw_store': 1, 'same_as': 2, - 'setarrayitem_gc': 10, + 'setarrayitem_gc': 7, 'setfield_gc': 22, }) @@ -656,15 +648,12 @@ self.check_trace_count(1) self.check_simple_loop({ 'float_ne': 1, - 'getarrayitem_gc': 4, 'guard_false': 1, 'guard_not_invalidated': 1, - 'guard_true': 5, - 'int_add': 12, + 'guard_true': 1, + 'int_add': 8, 'int_ge': 1, - 'int_lt': 4, 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 4, }) From noreply at buildbot.pypy.org Thu Oct 9 05:59:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:53 +0200 (CEST) Subject: [pypy-commit] pypy default: update test_pypy_c for iterator optimizations Message-ID: <20141009035953.0F4BF1C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73854:09a7faf2a960 Date: 2014-10-08 21:59 -0400 http://bitbucket.org/pypy/pypy/changeset/09a7faf2a960/ Log: update test_pypy_c for iterator optimizations diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -96,29 +96,9 @@ guard_true(i125, descr=...) i126 = int_lt(i117, i50) guard_true(i126, descr=...) - i127 = int_floordiv(i117, i61) - i128 = int_mul(i127, i61) - i129 = int_sub(i117, i128) - i130 = int_rshift(i129, 63) - i131 = int_add(i127, i130) - i132 = int_mul(i131, i71) - i133 = int_add(i55, i132) - i134 = int_mod(i117, i61) - i135 = int_rshift(i134, 63) - i136 = int_and(i61, i135) - i137 = int_add(i134, i136) - i138 = int_floordiv(i137, i80) - i139 = int_mul(i138, i80) - i140 = int_sub(i137, i139) - i141 = int_rshift(i140, 63) - i142 = int_add(i138, i141) - i143 = int_mul(i142, i88) - i144 = int_add(i133, i143) - i145 = int_mod(i137, i80) - i146 = int_rshift(i145, 63) - i147 = int_and(i80, i146) - i148 = int_add(i145, i147) - f149 = raw_load(i100, i144, descr=) + i128 = int_mul(i117, i59) + i129 = int_add(i55, i128) + f149 = raw_load(i100, i129, descr=) p150 = getfield_gc_pure(p123, descr=) i151 = int_add(i117, 1) setarrayitem_gc(p150, 1, 0, descr=) @@ -128,14 +108,12 @@ i155 = int_lt(i154, 0) guard_false(i155, descr=...) p156 = new_with_vtable(...) + setfield_gc(p156, p49, descr=) setfield_gc(p156, i55, descr=) setfield_gc(p156, 0, descr=) setfield_gc(p156, p150, descr=) - setfield_gc(p156, p49, descr=) setfield_gc(p16, p156, descr=) - i157 = arraylen_gc(p60, descr=) - i158 = arraylen_gc(p70, descr=) - jump(p0, p1, p3, p6, p7, p12, p14, p16, i151, f149, p26, i44, i50, i61, i71, i55, i80, i88, i100, p156, p49, p60, p70, descr=...) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i151, f149, p26, i44, i50, i59, i55, i100, p156, p49, descr=...) """) def test_array_flatiter_setitem_single(self): @@ -156,30 +134,10 @@ guard_true(i128, descr=...) i129 = int_lt(i120, i48) guard_true(i129, descr=...) - i130 = int_floordiv(i120, i59) - i131 = int_mul(i130, i59) - i132 = int_sub(i120, i131) - i133 = int_rshift(i132, 63) - i134 = int_add(i130, i133) - i135 = int_mul(i134, i69) - i136 = int_add(i53, i135) - i137 = int_mod(i120, i59) - i138 = int_rshift(i137, 63) - i139 = int_and(i59, i138) - i140 = int_add(i137, i139) - i141 = int_floordiv(i140, i78) - i142 = int_mul(i141, i78) - i143 = int_sub(i140, i142) - i144 = int_rshift(i143, 63) - i145 = int_add(i141, i144) - i146 = int_mul(i145, i86) - i147 = int_add(i136, i146) - i148 = int_mod(i140, i78) - i149 = int_rshift(i148, 63) - i150 = int_and(i78, i149) - i151 = int_add(i148, i150) + i131 = int_mul(i120, i57) + i132 = int_add(i53, i131) guard_not_invalidated(descr=...) - raw_store(i103, i147, 42.000000, descr=) + raw_store(i103, i132, 42.000000, descr=) p152 = getfield_gc_pure(p126, descr=) i153 = int_add(i120, 1) i154 = getfield_raw(ticker_address, descr=) @@ -188,12 +146,10 @@ i157 = int_lt(i154, 0) guard_false(i157, descr=...) p158 = new_with_vtable(...) + setfield_gc(p158, p47, descr=) setfield_gc(p158, i53, descr=) setfield_gc(p158, 0, descr=) setfield_gc(p158, p152, descr=) - setfield_gc(p158, p47, descr=) setfield_gc(p16, p158, descr=) - i159 = arraylen_gc(p58, descr=) - i160 = arraylen_gc(p68, descr=) - jump(p0, p1, p3, p6, p7, p12, p14, p16, i153, i42, i48, i59, i69, i53, i78, i86, p47, i103, p158, p58, p68, descr=...) + jump(p0, p1, p3, p6, p7, p12, p14, p16, i153, i42, i48, i57, i53, p47, i103, p158, descr=...) """) From noreply at buildbot.pypy.org Thu Oct 9 05:59:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:54 +0200 (CEST) Subject: [pypy-commit] pypy default: add track_index flag to iterator, use to save some operations Message-ID: <20141009035954.2A6271C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73855:a61621a9a9fd Date: 2014-10-08 22:20 -0400 http://bitbucket.org/pypy/pypy/changeset/a61621a9a9fd/ Log: add track_index flag to iterator, use to save some operations diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -88,7 +88,10 @@ class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]', 'factors[*]'] + 'strides[*]', 'backstrides[*]', 'factors[*]', + 'track_index'] + + track_index = True def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) @@ -126,7 +129,9 @@ @jit.unroll_safe def next(self, state): assert state.iterator is self - index = state.index + 1 + index = state.index + if self.track_index: + index += 1 indices = state.indices offset = state.offset if self.contiguous: @@ -158,6 +163,7 @@ @jit.unroll_safe def update(self, state): assert state.iterator is self + assert self.track_index if not self.contiguous: return state current = state.index @@ -172,6 +178,7 @@ def done(self, state): assert state.iterator is self + assert self.track_index return state.index >= self.size def getitem(self, state): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -48,6 +48,7 @@ left_iter, left_state = w_lhs.create_iter(shape) right_iter, right_state = w_rhs.create_iter(shape) out_iter, out_state = out.create_iter(shape) + left_iter.track_index = right_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, @@ -182,6 +183,9 @@ iter, state = y_iter, y_state else: iter, state = x_iter, x_state + out_iter.track_index = x_iter.track_index = False + arr_iter.track_index = y_iter.track_index = False + iter.track_index = True shapelen = len(shape) while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, @@ -299,6 +303,7 @@ assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype outi, outs = result.create_iter() + outi.track_index = False lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) lefts = lefti.reset() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -105,7 +105,7 @@ 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 1, - 'int_add': 7, + 'int_add': 5, 'int_ge': 1, 'int_lt': 1, 'jump': 1, @@ -134,7 +134,7 @@ 'guard_false': 4, 'guard_not_invalidated': 1, 'guard_true': 3, - 'int_add': 7, + 'int_add': 5, 'int_ge': 1, 'int_is_true': 1, 'int_lt': 1, @@ -163,7 +163,7 @@ 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 1, - 'int_add': 7, + 'int_add': 5, 'int_ge': 1, 'int_lt': 1, 'jump': 1, @@ -388,7 +388,7 @@ 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 2, - 'int_add': 8, + 'int_add': 6, 'int_ge': 1, 'int_lt': 2, 'jump': 1, @@ -521,7 +521,7 @@ 'float_add': 1, 'guard_false': 1, 'guard_not_invalidated': 1, - 'int_add': 6, + 'int_add': 4, 'int_ge': 1, 'jump': 1, 'raw_load': 2, @@ -602,12 +602,12 @@ 'float_mul': 2, 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 9, - 'getfield_gc_pure': 46, + 'getfield_gc_pure': 49, 'guard_class': 4, - 'guard_false': 12, + 'guard_false': 13, 'guard_not_invalidated': 2, - 'guard_true': 12, - 'int_add': 18, + 'guard_true': 14, + 'int_add': 17, 'int_ge': 4, 'int_is_true': 3, 'int_le': 5, @@ -651,7 +651,7 @@ 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 1, - 'int_add': 8, + 'int_add': 5, 'int_ge': 1, 'jump': 1, 'raw_load': 2, From noreply at buildbot.pypy.org Thu Oct 9 05:59:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 05:59:55 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20141009035955.ED4DB1C346A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73856:256a62fffbed Date: 2014-10-08 23:59 -0400 http://bitbucket.org/pypy/pypy/changeset/256a62fffbed/ Log: merge heads diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,6 @@ Allocate by 4-byte chunks in rffi_platform, Skip testing objdump if it does not exist, and other small adjustments in own tests + +.. branch: rtyper-stuff +Small internal refactorings in the rtyper. diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -7,6 +7,7 @@ interpleveldefs = { 'set_param': 'interp_jit.set_param', 'residual_call': 'interp_jit.residual_call', + 'not_from_assembler': 'interp_jit.W_NotFromAssembler', 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_optimize_hook': 'interp_resop.set_optimize_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,6 +12,9 @@ from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import interp2app from opcode import opmap @@ -144,3 +147,40 @@ '''For testing. Invokes callable(...), but without letting the JIT follow the call.''' return space.call_args(w_callable, __args__) + + +class W_NotFromAssembler(W_Root): + def __init__(self, space, w_callable): + self.space = space + self.w_callable = w_callable + def descr_call(self, __args__): + _call_not_in_trace(self.space, self.w_callable, __args__) + return self + + at jit.not_in_trace +def _call_not_in_trace(space, w_callable, __args__): + # this _call_not_in_trace() must return None + space.call_args(w_callable, __args__) + +def not_from_assembler_new(space, w_subtype, w_callable): + return W_NotFromAssembler(space, w_callable) + +W_NotFromAssembler.typedef = TypeDef("not_from_assembler", + __doc__ = """\ +A decorator that returns a callable that invokes the original +callable, but not from the JIT-produced assembler. It is called +from the interpreted mode, and from the JIT creation (pyjitpl) or +exiting (blackhole) steps, but just not from the final assembler. + +Note that the return value of the callable is ignored, because +there is no reasonable way to guess what it sound be in case the +function is not called. + +This is meant to be used notably in sys.settrace() for coverage- +like tools. For that purpose, if g = not_from_assembler(f), then +'g(*args)' may call 'f(*args)' but it always return g itself. +""", + __new__ = interp2app(not_from_assembler_new), + __call__ = interp2app(W_NotFromAssembler.descr_call), +) +W_NotFromAssembler.typedef.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_not_in_trace.py b/pypy/module/pypyjit/test/test_jit_not_in_trace.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test/test_jit_not_in_trace.py @@ -0,0 +1,19 @@ + +class AppTestJitNotInTrace(object): + spaceconfig = dict(usemodules=('pypyjit',)) + + def test_not_from_assembler(self): + import pypyjit + @pypyjit.not_from_assembler + def f(x, y): + return 42 + r = f(3, 4) + assert r is f + + def test_not_from_assembler_exception(self): + import pypyjit + @pypyjit.not_from_assembler + def f(x, y): + raise ValueError(y, x) + e = raises(ValueError, f, 3, 4) + assert e.value.args == (4, 3) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -25,6 +25,7 @@ OS_THREADLOCALREF_GET = 5 # llop.threadlocalref_get OS_GET_ERRNO = 6 # rposix.get_errno OS_SET_ERRNO = 7 # rposix.set_errno + OS_NOT_IN_TRACE = 8 # for calls not recorded in the jit trace # OS_STR_CONCAT = 22 # "stroruni.concat" OS_STR_SLICE = 23 # "stroruni.slice" @@ -96,6 +97,7 @@ _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_DICT_LOOKUP, + OS_NOT_IN_TRACE, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1562,7 +1562,17 @@ kind = getkind(args[0].concretetype) return SpaceOperation('%s_isvirtual' % kind, args, op.result) elif oopspec_name == 'jit.force_virtual': - return self._handle_oopspec_call(op, args, EffectInfo.OS_JIT_FORCE_VIRTUAL, EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) + return self._handle_oopspec_call(op, args, + EffectInfo.OS_JIT_FORCE_VIRTUAL, + EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) + elif oopspec_name == 'jit.not_in_trace': + # ignore 'args' and use the original 'op.args' + if op.result.concretetype is not lltype.Void: + raise Exception( + "%r: jit.not_in_trace() function must return None" + % (op.args[0],)) + return self._handle_oopspec_call(op, op.args[1:], + EffectInfo.OS_NOT_IN_TRACE) else: raise AssertionError("missing support for %r" % oopspec_name) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1393,6 +1393,9 @@ # allboxes = self._build_allboxes(funcbox, argboxes, descr) effectinfo = descr.get_extra_info() + if effectinfo.oopspecindex == effectinfo.OS_NOT_IN_TRACE: + return self.metainterp.do_not_in_trace_call(allboxes, descr) + if (assembler_call or effectinfo.check_forces_virtual_or_virtualizable()): # residual calls require attention to keep virtualizables in-sync @@ -2830,6 +2833,19 @@ if not we_are_translated(): # for llgraph descr._original_func_ = op.getarg(0).value + def do_not_in_trace_call(self, allboxes, descr): + self.clear_exception() + resbox = executor.execute_varargs(self.cpu, self, rop.CALL, + allboxes, descr) + assert resbox is None + if self.last_exc_value_box is not None: + # cannot trace this! it raises, so we have to follow the + # exception-catching path, but the trace doesn't contain + # the call at all + raise SwitchToBlackhole(Counters.ABORT_ESCAPE, + raising_exception=True) + return None + # ____________________________________________________________ class ChangeFrame(jitexc.JitException): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4044,3 +4044,70 @@ res = self.interp_operations(f, [17]) assert res == 42 self.check_operations_history(guard_true=1, guard_false=0) + + def test_not_in_trace(self): + class X: + n = 0 + def g(x): + if we_are_jitted(): + raise NotImplementedError + x.n += 1 + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n', 'token', 'x']) + def f(n): + token = 0 + x = X() + while n >= 0: + jitdriver.jit_merge_point(n=n, x=x, token=token) + if not we_are_jitted(): + token += 1 + g(x) + n -= 1 + return x.n + token * 1000 + + res = self.meta_interp(f, [10]) + assert res == 2003 # two runs before jitting; then one tracing run + self.check_resops(int_add=0, call=0, call_may_force=0) + + def test_not_in_trace_exception(self): + def g(): + if we_are_jitted(): + raise NotImplementedError + raise ValueError + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n']) + def f(n): + while n >= 0: + jitdriver.jit_merge_point(n=n) + try: + g() + except ValueError: + n -= 1 + return 42 + + res = self.meta_interp(f, [10]) + assert res == 42 + self.check_aborted_count(3) + + def test_not_in_trace_blackhole(self): + class X: + seen = 0 + def g(x): + if we_are_jitted(): + raise NotImplementedError + x.seen = 42 + g.oopspec = 'jit.not_in_trace()' + + jitdriver = JitDriver(greens=[], reds=['n']) + def f(n): + while n >= 0: + jitdriver.jit_merge_point(n=n) + n -= 1 + x = X() + g(x) + return x.seen + + res = self.meta_interp(f, [10]) + assert res == 42 diff --git a/rpython/rlib/_stacklet_n_a.py b/rpython/rlib/_stacklet_n_a.py --- a/rpython/rlib/_stacklet_n_a.py +++ b/rpython/rlib/_stacklet_n_a.py @@ -1,33 +1,35 @@ from rpython.rlib import _rffi_stacklet as _c -from rpython.rlib import objectmodel, debug +from rpython.rlib import debug +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.annlowlevel import llhelper -from rpython.tool.staticmethods import StaticMethods -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): + @staticmethod + @specialize.arg(1) def new(thrd, callback, arg): h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) if not h: raise MemoryError return h - new._annspecialcase_ = 'specialize:arg(1)' + @staticmethod def switch(h): h = _c.switch(h) if not h: raise MemoryError return h + @staticmethod def destroy(thrd, h): _c.destroy(thrd._thrd, h) - if objectmodel.we_are_translated(): + if we_are_translated(): debug.debug_print("not using a framework GC: " "stacklet_destroy() may leak") - is_empty_handle = _c.is_empty_handle + is_empty_handle = staticmethod(_c.is_empty_handle) + @staticmethod def get_null_handle(): return _c.null_handle diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -3,7 +3,6 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.tool.staticmethods import StaticMethods NULL_SUSPSTACK = lltype.nullptr(llmemory.GCREF.TO) @@ -68,9 +67,7 @@ return oldsuspstack -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): def new(thrd, callback, arg): gcrootfinder.callback = callback thread_handle = thrd._thrd @@ -78,6 +75,7 @@ h = _c.new(thread_handle, llhelper(_c.run_fn, _new_callback), arg) return get_result_suspstack(h) new._dont_inline_ = True + new = staticmethod(new) def switch(suspstack): # suspstack has a handle to target, i.e. where to switch to @@ -90,10 +88,13 @@ h = _c.switch(h) return get_result_suspstack(h) switch._dont_inline_ = True + switch = staticmethod(switch) + @staticmethod def is_empty_handle(suspstack): return not suspstack + @staticmethod def get_null_handle(): return NULL_SUSPSTACK diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -194,6 +194,14 @@ return func return decorator +def not_in_trace(func): + """A decorator for a function with no return value. It makes the + function call disappear from the jit traces. It is still called in + interpreted mode, and by the jit tracing and blackholing, but not + by the final assembler.""" + func.oopspec = "jit.not_in_trace()" # note that 'func' may take arguments + return func + @oopspec("jit.isconstant(value)") def isconstant(value): """ diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,6 +191,11 @@ def _is_varsize(self): return False + def _contains_value(self, value): + if self is Void: + return True + return isCompatibleType(typeOf(value), self) + NFOUND = object() class ContainerType(LowLevelType): diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -38,12 +38,14 @@ i += s.length() cls.ll_strsetitem_nonneg(s, i, item) + @staticmethod def ll_strsetitem_nonneg(s, i, item): chars = s.chars ll_assert(i >= 0, "negative str getitem index") ll_assert(i < len(chars), "str getitem index out of bound") chars[i] = chr(item) + @staticmethod def ll_stritem_nonneg(s, i): return ord(rstr.LLHelpers.ll_stritem_nonneg(s, i)) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -270,6 +270,7 @@ class LLHelpers(AbstractLLHelpers): from rpython.rtyper.annlowlevel import llstr, llunicode + @staticmethod @jit.elidable def ll_str_mul(s, times): if times < 0: @@ -292,6 +293,7 @@ i += j return newstr + @staticmethod @jit.elidable def ll_char_mul(ch, times): if typeOf(ch) is Char: @@ -308,9 +310,11 @@ j += 1 return newstr + @staticmethod def ll_strlen(s): return len(s.chars) + @staticmethod @signature(types.any(), types.int(), returns=types.any()) def ll_stritem_nonneg(s, i): chars = s.chars @@ -318,6 +322,7 @@ ll_assert(i < len(chars), "str getitem index out of bound") return chars[i] + @staticmethod def ll_chr2str(ch): if typeOf(ch) is Char: malloc = mallocstr @@ -328,6 +333,7 @@ return s # @jit.look_inside_iff(lambda str: jit.isconstant(len(str.chars)) and len(str.chars) == 1) + @staticmethod @jit.oopspec("str.str2unicode(str)") def ll_str2unicode(str): lgt = len(str.chars) @@ -338,6 +344,7 @@ s.chars[i] = cast_primitive(UniChar, str.chars[i]) return s + @staticmethod def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY @@ -347,6 +354,7 @@ b.chars[i] = str.chars[i] return b + @staticmethod @jit.elidable def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 @@ -362,13 +370,17 @@ s.hash = x return x + @staticmethod def ll_length(s): return len(s.chars) + @staticmethod def ll_strfasthash(s): return s.hash # assumes that the hash is already computed + @staticmethod @jit.elidable + @jit.oopspec('stroruni.concat(s1, s2)') def ll_strconcat(s1, s2): len1 = s1.length() len2 = s2.length() @@ -386,8 +398,8 @@ else: newstr.copy_contents(s2, newstr, 0, len1, len2) return newstr - ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' + @staticmethod @jit.elidable def ll_strip(s, ch, left, right): s_len = len(s.chars) @@ -408,6 +420,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_default(s, left, right): s_len = len(s.chars) @@ -428,6 +441,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_multiple(s, s2, left, right): s_len = len(s.chars) @@ -448,6 +462,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_upper(s): s_chars = s.chars @@ -462,6 +477,7 @@ i += 1 return result + @staticmethod @jit.elidable def ll_lower(s): s_chars = s.chars @@ -476,6 +492,7 @@ i += 1 return result + @staticmethod def ll_join(s, length, items): s_chars = s.chars s_len = len(s_chars) @@ -509,7 +526,9 @@ i += 1 return result + @staticmethod @jit.elidable + @jit.oopspec('stroruni.cmp(s1, s2)') def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -531,9 +550,10 @@ return diff i += 1 return len1 - len2 - ll_strcmp.oopspec = 'stroruni.cmp(s1, s2)' + @staticmethod @jit.elidable + @jit.oopspec('stroruni.equal(s1, s2)') def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -551,8 +571,8 @@ return False j += 1 return True - ll_streq.oopspec = 'stroruni.equal(s1, s2)' + @staticmethod @jit.elidable def ll_startswith(s1, s2): len1 = len(s1.chars) @@ -569,11 +589,13 @@ return True + @staticmethod def ll_startswith_char(s, ch): if not len(s.chars): return False return s.chars[0] == ch + @staticmethod @jit.elidable def ll_endswith(s1, s2): len1 = len(s1.chars) @@ -591,11 +613,13 @@ return True + @staticmethod def ll_endswith_char(s, ch): if not len(s.chars): return False return s.chars[len(s.chars) - 1] == ch + @staticmethod @jit.elidable @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find_char(s, ch, start, end): @@ -608,6 +632,7 @@ i += 1 return -1 + @staticmethod @jit.elidable def ll_rfind_char(s, ch, start, end): if end > len(s.chars): @@ -619,6 +644,7 @@ return i return -1 + @staticmethod @jit.elidable def ll_count_char(s, ch, start, end): count = 0 @@ -631,6 +657,7 @@ i += 1 return count + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: @@ -646,6 +673,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: @@ -681,6 +709,7 @@ res = 0 return res + @staticmethod @jit.elidable def ll_search(s1, s2, start, end, mode): count = 0 @@ -768,6 +797,7 @@ return -1 return count + @staticmethod @signature(types.int(), types.any(), returns=types.any()) @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic( items, length)) @@ -802,6 +832,7 @@ i += 1 return result + @staticmethod @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars)) def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder @@ -821,6 +852,7 @@ i += 1 return result + @staticmethod @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) @jit.elidable @@ -836,9 +868,11 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + @staticmethod def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @staticmethod @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): @@ -851,10 +885,12 @@ stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop) + @staticmethod def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) + @staticmethod def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -889,6 +925,7 @@ item.copy_contents(s, item, i, 0, j - i) return res + @staticmethod def ll_split(LIST, s, c, max): count = 1 if max == -1: @@ -920,6 +957,7 @@ item.copy_contents(s, item, prev_pos, 0, last - prev_pos) return res + @staticmethod def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -955,6 +993,7 @@ item.copy_contents(s, item, j, 0, i - j) return res + @staticmethod def ll_rsplit(LIST, s, c, max): count = 1 if max == -1: @@ -986,6 +1025,7 @@ item.copy_contents(s, item, 0, 0, prev_pos) return res + @staticmethod @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1001,6 +1041,7 @@ j += 1 return newstr + @staticmethod @jit.elidable def ll_contains(s, c): chars = s.chars @@ -1012,6 +1053,7 @@ i += 1 return False + @staticmethod @jit.elidable def ll_int(s, base): if not 2 <= base <= 36: @@ -1068,23 +1110,29 @@ # ll_build_push(x, next_string, n-1) # s = ll_build_finish(x) + @staticmethod def ll_build_start(parts_count): return malloc(TEMP, parts_count) + @staticmethod def ll_build_push(builder, next_string, index): builder[index] = next_string + @staticmethod def ll_build_finish(builder): return LLHelpers.ll_join_strs(len(builder), builder) + @staticmethod @specialize.memo() def ll_constant(s): return string_repr.convert_const(s) + @staticmethod @specialize.memo() def ll_constant_unicode(s): return unicode_repr.convert_const(s) + @classmethod def do_stringformat(cls, hop, sourcevarsrepr): s_str = hop.args_s[0] assert s_str.is_constant() @@ -1150,8 +1198,8 @@ hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%' return hop.gendirectcall(cls.ll_join_strs, size, vtemp) - do_stringformat = classmethod(do_stringformat) + @staticmethod @jit.dont_look_inside def ll_string2list(RESLIST, src): length = len(src.chars) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,8 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, - LowLevelType, isCompatibleType) +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -120,14 +119,9 @@ def convert_const(self, value): "Convert the given constant value to the low-level repr of 'self'." - if self.lowleveltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError, TypeError): - realtype = '???' - if realtype != self.lowleveltype: - raise TyperError("convert_const(self = %r, value = %r)" % ( - self, value)) + if not self.lowleveltype._contains_value(value): + raise TyperError("convert_const(self = %r, value = %r)" % ( + self, value)) return value def get_ll_eq_function(self): @@ -356,18 +350,9 @@ lltype = reqtype else: raise TypeError(repr(reqtype)) - # Void Constants can hold any value; - # non-Void Constants must hold a correctly ll-typed value - if lltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError): - realtype = '???' - if not isCompatibleType(realtype, lltype): - raise TyperError("inputconst(reqtype = %s, value = %s):\n" - "expected a %r,\n" - " got a %r" % (reqtype, value, - lltype, realtype)) + if not lltype._contains_value(value): + raise TyperError("inputconst(): expected a %r, got %r" % + (lltype, value)) c = Constant(value) c.concretetype = lltype return c @@ -422,7 +407,8 @@ def __ne__(self, other): return not (self == other) - def build_ll_dummy_value(self): + @property + def ll_dummy_value(self): TYPE = self.TYPE try: return self.rtyper.cache_dummy_values[TYPE] @@ -435,8 +421,6 @@ self.rtyper.cache_dummy_values[TYPE] = p return p - ll_dummy_value = property(build_ll_dummy_value) - # logging/warning diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -9,7 +9,6 @@ from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name -from rpython.tool.staticmethods import StaticMethods from rpython.rlib.rstring import UnicodeBuilder @@ -800,10 +799,8 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem -class AbstractLLHelpers: - __metaclass__ = StaticMethods - +class AbstractLLHelpers(object): + @staticmethod def ll_isdigit(s): from rpython.rtyper.annlowlevel import hlstr @@ -815,6 +812,7 @@ return False return True + @staticmethod def ll_isalpha(s): from rpython.rtyper.annlowlevel import hlstr @@ -826,6 +824,7 @@ return False return True + @staticmethod def ll_isalnum(s): from rpython.rtyper.annlowlevel import hlstr @@ -837,14 +836,17 @@ return False return True + @staticmethod def ll_char_isspace(ch): c = ord(ch) return c == 32 or (9 <= c <= 13) # c in (9, 10, 11, 12, 13, 32) + @staticmethod def ll_char_isdigit(ch): c = ord(ch) return c <= 57 and c >= 48 + @staticmethod def ll_char_isalpha(ch): c = ord(ch) if c >= 97: @@ -852,6 +854,7 @@ else: return 65 <= c <= 90 + @staticmethod def ll_char_isalnum(ch): c = ord(ch) if c >= 65: @@ -862,47 +865,54 @@ else: return 48 <= c <= 57 + @staticmethod def ll_char_isupper(ch): c = ord(ch) return 65 <= c <= 90 + @staticmethod def ll_char_islower(ch): c = ord(ch) return 97 <= c <= 122 + @staticmethod def ll_upper_char(ch): if 'a' <= ch <= 'z': ch = chr(ord(ch) - 32) return ch + @staticmethod def ll_lower_char(ch): if 'A' <= ch <= 'Z': ch = chr(ord(ch) + 32) return ch + @staticmethod def ll_char_hash(ch): return ord(ch) + @staticmethod def ll_unichar_hash(ch): return ord(ch) + @classmethod def ll_str_is_true(cls, s): # check if a string is True, allowing for None return bool(s) and cls.ll_strlen(s) != 0 - ll_str_is_true = classmethod(ll_str_is_true) + @classmethod def ll_stritem_nonneg_checked(cls, s, i): if i >= cls.ll_strlen(s): raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_nonneg_checked = classmethod(ll_stritem_nonneg_checked) + @classmethod def ll_stritem(cls, s, i): if i < 0: i += cls.ll_strlen(s) return cls.ll_stritem_nonneg(s, i) - ll_stritem = classmethod(ll_stritem) + @classmethod def ll_stritem_checked(cls, s, i): length = cls.ll_strlen(s) if i < 0: @@ -910,8 +920,8 @@ if i >= length or i < 0: raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_checked = classmethod(ll_stritem_checked) + @staticmethod def parse_fmt_string(fmt): # we support x, d, s, f, [r] it = iter(fmt) @@ -937,6 +947,7 @@ r.append(curstr) return r + @staticmethod def ll_float(ll_str): from rpython.rtyper.annlowlevel import hlstr from rpython.rlib.rfloat import rstring_to_float @@ -961,6 +972,7 @@ assert end >= 0 return rstring_to_float(s[beg:end + 1]) + @classmethod def ll_splitlines(cls, LIST, ll_str, keep_newlines): from rpython.rtyper.annlowlevel import hlstr s = hlstr(ll_str) @@ -991,4 +1003,3 @@ item = cls.ll_stringslice_startstop(ll_str, j, strlen) res.ll_setitem_fast(list_length, item) return res - ll_splitlines = classmethod(ll_splitlines) diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py deleted file mode 100644 --- a/rpython/tool/staticmethods.py +++ /dev/null @@ -1,14 +0,0 @@ -import types -class AbstractMethods(type): - def __new__(cls, cls_name, bases, cls_dict): - for key, value in cls_dict.iteritems(): - if isinstance(value, types.FunctionType): - cls_dict[key] = cls.decorator(value) - return type.__new__(cls, cls_name, bases, cls_dict) - - -class StaticMethods(AbstractMethods): - """ - Metaclass that turns plain methods into staticmethods. - """ - decorator = staticmethod From noreply at buildbot.pypy.org Thu Oct 9 07:01:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 07:01:31 +0200 (CEST) Subject: [pypy-commit] pypy default: don't track index on one of the call1 iterators, test Message-ID: <20141009050132.018721C0312@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73857:1a0e91f3f025 Date: 2014-10-09 01:01 -0400 http://bitbucket.org/pypy/pypy/changeset/1a0e91f3f025/ Log: don't track index on one of the call1 iterators, test diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -72,6 +72,7 @@ out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter, obj_state = w_obj.create_iter(shape) out_iter, out_state = out.create_iter(shape) + obj_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -329,20 +329,23 @@ def define_ufunc(): return """ a = |30| - b = a + a - c = unegative(b) - c -> 3 + b = unegative(a) + b -> 3 """ def test_ufunc(self): result = self.run("ufunc") - assert result == -6 - py.test.skip("don't run for now") - self.check_simple_loop({"raw_load": 2, "float_add": 1, - "float_neg": 1, - "raw_store": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1, - 'arraylen_gc': 1}) + assert result == -3 + self.check_simple_loop({ + 'float_neg': 1, + 'guard_not_invalidated': 1, + 'int_add': 3, + 'int_ge': 1, + 'guard_false': 1, + 'jump': 1, + 'raw_load': 1, + 'raw_store': 1, + }) def define_specialization(): return """ From noreply at buildbot.pypy.org Thu Oct 9 09:03:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 09:03:52 +0200 (CEST) Subject: [pypy-commit] pypy default: fix argument order to ufunc reduce Message-ID: <20141009070352.B9F4A1C14FD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73858:ac6033fa1b3c Date: 2014-10-09 01:15 -0400 http://bitbucket.org/pypy/pypy/changeset/ac6033fa1b3c/ Log: fix argument order to ufunc reduce diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -812,6 +812,7 @@ b = add.reduce(a, 0, keepdims=True) assert b.shape == (1, 4) assert (add.reduce(a, 0, keepdims=True) == [12, 15, 18, 21]).all() + assert (add.reduce(a, 0, None, None, True) == [12, 15, 18, 21]).all() def test_bitwise(self): from numpypy import bitwise_and, bitwise_or, bitwise_xor, arange, array diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -102,9 +102,9 @@ return self.reduce(space, w_obj, w_axis, True, #keepdims must be true out, w_dtype, cumulative=True) - @unwrap_spec(skipna=bool, keepdims=bool) + @unwrap_spec(keepdims=bool) def descr_reduce(self, space, w_obj, w_axis=None, w_dtype=None, - skipna=False, keepdims=False, w_out=None): + w_out=None, keepdims=False): """reduce(...) reduce(a, axis=0) From noreply at buildbot.pypy.org Thu Oct 9 09:03:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 09:03:54 +0200 (CEST) Subject: [pypy-commit] pypy default: implement searchside_converter Message-ID: <20141009070354.113BC1C14FD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73859:cb9125b5bf31 Date: 2014-10-09 01:41 -0400 http://bitbucket.org/pypy/pypy/changeset/cb9125b5bf31/ Log: implement searchside_converter diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -65,6 +65,9 @@ FLOATINGLTR = 'f' COMPLEXLTR = 'c' +SEARCHLEFT = 0 +SEARCHRIGHT = 1 + ANYORDER = -1 CORDER = 0 FORTRANORDER = 1 diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import constants as NPY @@ -41,6 +41,23 @@ space.wrap("clipmode not understood")) +def searchside_converter(space, w_obj): + try: + s = space.str_w(w_obj) + except OperationError: + s = None + if not s: + raise oefmt(space.w_ValueError, + "expected nonempty string for keyword 'side'") + if s[0] == 'l' or s[0] == 'L': + return NPY.SEARCHLEFT + elif s[0] == 'r' or s[0] == 'R': + return NPY.SEARCHRIGHT + else: + raise oefmt(space.w_ValueError, + "'%s' is an invalid value for keyword 'side'", s) + + def order_converter(space, w_order, default): if space.is_none(w_order): return default diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -16,7 +16,7 @@ ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.converters import multi_axis_converter, \ - order_converter, shape_converter + order_converter, shape_converter, searchside_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ @@ -728,21 +728,12 @@ loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out - @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) - def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): + @unwrap_spec(w_side=WrappedDefault('left'), w_sorter=WrappedDefault(None)) + def descr_searchsorted(self, space, w_v, w_side=None, w_sorter=None): if not space.is_none(w_sorter): raise OperationError(space.w_NotImplementedError, space.wrap( 'sorter not supported in searchsort')) - if not side or len(side) < 1: - raise OperationError(space.w_ValueError, space.wrap( - "expected nonempty string for keyword 'side'")) - elif side[0] == 'l' or side[0] == 'L': - side = 'l' - elif side[0] == 'r' or side[0] == 'R': - side = 'r' - else: - raise oefmt(space.w_ValueError, - "'%s' is an invalid value for keyword 'side'", side) + side = searchside_converter(space, w_side) if len(self.get_shape()) > 1: raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) @@ -1321,7 +1312,7 @@ else: imax = imid return imin - if side == 'l': + if side == 0: op = operator.lt else: op = operator.le diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -366,6 +366,12 @@ ret = a.searchsorted(3, side='right') assert ret == 3 assert isinstance(ret, np.generic) + exc = raises(ValueError, a.searchsorted, 3, side=None) + assert str(exc.value) == "expected nonempty string for keyword 'side'" + exc = raises(ValueError, a.searchsorted, 3, side='') + assert str(exc.value) == "expected nonempty string for keyword 'side'" + exc = raises(ValueError, a.searchsorted, 3, side=2) + assert str(exc.value) == "expected nonempty string for keyword 'side'" ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: From noreply at buildbot.pypy.org Thu Oct 9 09:03:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 09:03:55 +0200 (CEST) Subject: [pypy-commit] pypy default: shuffle some things for clarity Message-ID: <20141009070355.46DF91C14FD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73860:eb0014921290 Date: 2014-10-09 02:02 -0400 http://bitbucket.org/pypy/pypy/changeset/eb0014921290/ Log: shuffle some things for clarity diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -358,11 +358,11 @@ self.dtype = dtype def argsort(self, space, w_axis): - from pypy.module.micronumpy.sort import argsort_array + from .selection import argsort_array return argsort_array(self, space, w_axis) def sort(self, space, w_axis, w_order): - from pypy.module.micronumpy.sort import sort_array + from .selection import sort_array return sort_array(self, space, w_axis, w_order) def base(self): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -21,6 +21,7 @@ from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple +from .selection import app_searchsort def _match_dot_shapes(space, left, right): @@ -1299,31 +1300,6 @@ return res """, filename=__file__).interphook('ptp') -app_searchsort = applevel(r""" - def searchsort(arr, v, side, result): - import operator - def func(a, op, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if op(a[imid], val): - imin = imid +1 - else: - imax = imid - return imin - if side == 0: - op = operator.lt - else: - op = operator.le - if v.size < 2: - result[...] = func(arr, op, v) - else: - for i in range(v.size): - result[i] = func(arr, op, v[i]) - return result -""", filename=__file__).interphook('searchsort') - W_NDimArray.typedef = TypeDef("numpy.ndarray", __new__ = interp2app(descr_new_array), diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/selection.py rename from pypy/module/micronumpy/sort.py rename to pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/selection.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import applevel from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen @@ -353,3 +354,29 @@ cache[cls] = make_sort_function(space, cls, it) self.cache = cache self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) + + +app_searchsort = applevel(r""" + def searchsort(arr, v, side, result): + import operator + def func(a, op, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if op(a[imid], val): + imin = imid +1 + else: + imax = imid + return imin + if side == 0: + op = operator.lt + else: + op = operator.le + if v.size < 2: + result[...] = func(arr, op, v) + else: + for i in range(v.size): + result[i] = func(arr, op, v[i]) + return result +""", filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_selection.py rename from pypy/module/micronumpy/test/test_sorting.py rename to pypy/module/micronumpy/test/test_selection.py From noreply at buildbot.pypy.org Thu Oct 9 09:03:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 09:03:56 +0200 (CEST) Subject: [pypy-commit] pypy default: clean up searchsorted Message-ID: <20141009070356.76CF21C14FD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73861:191af129e380 Date: 2014-10-09 02:22 -0400 http://bitbucket.org/pypy/pypy/changeset/191af129e380/ Log: clean up searchsorted diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -357,26 +357,27 @@ app_searchsort = applevel(r""" - def searchsort(arr, v, side, result): - import operator - def func(a, op, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if op(a[imid], val): - imin = imid +1 - else: - imax = imid - return imin + import operator + + def _searchsort(a, op, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if op(a[imid], val): + imin = imid + 1 + else: + imax = imid + return imin + + def searchsort(a, v, side, result): if side == 0: op = operator.lt else: op = operator.le if v.size < 2: - result[...] = func(arr, op, v) + result[...] = _searchsort(a, op, v) else: for i in range(v.size): - result[i] = func(arr, op, v[i]) - return result + result[i] = _searchsort(a, op, v[i]) """, filename=__file__).interphook('searchsort') From noreply at buildbot.pypy.org Thu Oct 9 09:41:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 09:41:54 +0200 (CEST) Subject: [pypy-commit] pypy default: match these setfields in any order Message-ID: <20141009074154.673321C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73862:1993832c547b Date: 2014-10-09 03:41 -0400 http://bitbucket.org/pypy/pypy/changeset/1993832c547b/ Log: match these setfields in any order diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -108,11 +108,13 @@ i155 = int_lt(i154, 0) guard_false(i155, descr=...) p156 = new_with_vtable(...) + {{{ setfield_gc(p156, p49, descr=) setfield_gc(p156, i55, descr=) setfield_gc(p156, 0, descr=) setfield_gc(p156, p150, descr=) setfield_gc(p16, p156, descr=) + }}} jump(p0, p1, p3, p6, p7, p12, p14, p16, i151, f149, p26, i44, i50, i59, i55, i100, p156, p49, descr=...) """) @@ -146,10 +148,12 @@ i157 = int_lt(i154, 0) guard_false(i157, descr=...) p158 = new_with_vtable(...) + {{{ setfield_gc(p158, p47, descr=) setfield_gc(p158, i53, descr=) setfield_gc(p158, 0, descr=) setfield_gc(p158, p152, descr=) setfield_gc(p16, p158, descr=) + }}} jump(p0, p1, p3, p6, p7, p12, p14, p16, i153, i42, i48, i57, i53, p47, i103, p158, descr=...) """) From noreply at buildbot.pypy.org Thu Oct 9 10:16:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 10:16:24 +0200 (CEST) Subject: [pypy-commit] pypy default: (arigo) also fix for 32bit Message-ID: <20141009081624.92C8B1C309B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73863:22a85e994428 Date: 2014-10-09 04:16 -0400 http://bitbucket.org/pypy/pypy/changeset/22a85e994428/ Log: (arigo) also fix for 32bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -99,7 +99,7 @@ i128 = int_mul(i117, i59) i129 = int_add(i55, i128) f149 = raw_load(i100, i129, descr=) - p150 = getfield_gc_pure(p123, descr=) + p150 = getfield_gc_pure(p123, descr=) i151 = int_add(i117, 1) setarrayitem_gc(p150, 1, 0, descr=) setarrayitem_gc(p150, 0, 0, descr=) @@ -109,11 +109,11 @@ guard_false(i155, descr=...) p156 = new_with_vtable(...) {{{ - setfield_gc(p156, p49, descr=) - setfield_gc(p156, i55, descr=) - setfield_gc(p156, 0, descr=) - setfield_gc(p156, p150, descr=) - setfield_gc(p16, p156, descr=) + setfield_gc(p156, p49, descr=) + setfield_gc(p156, i55, descr=) + setfield_gc(p156, 0, descr=) + setfield_gc(p156, p150, descr=) + setfield_gc(p16, p156, descr=) }}} jump(p0, p1, p3, p6, p7, p12, p14, p16, i151, f149, p26, i44, i50, i59, i55, i100, p156, p49, descr=...) """) @@ -140,7 +140,7 @@ i132 = int_add(i53, i131) guard_not_invalidated(descr=...) raw_store(i103, i132, 42.000000, descr=) - p152 = getfield_gc_pure(p126, descr=) + p152 = getfield_gc_pure(p126, descr=) i153 = int_add(i120, 1) i154 = getfield_raw(ticker_address, descr=) setarrayitem_gc(p152, 1, 0, descr=) @@ -149,11 +149,11 @@ guard_false(i157, descr=...) p158 = new_with_vtable(...) {{{ - setfield_gc(p158, p47, descr=) - setfield_gc(p158, i53, descr=) - setfield_gc(p158, 0, descr=) - setfield_gc(p158, p152, descr=) - setfield_gc(p16, p158, descr=) + setfield_gc(p158, p47, descr=) + setfield_gc(p158, i53, descr=) + setfield_gc(p158, 0, descr=) + setfield_gc(p158, p152, descr=) + setfield_gc(p16, p158, descr=) }}} jump(p0, p1, p3, p6, p7, p12, p14, p16, i153, i42, i48, i57, i53, p47, i103, p158, descr=...) """) From noreply at buildbot.pypy.org Thu Oct 9 17:30:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 17:30:25 +0200 (CEST) Subject: [pypy-commit] pypy default: provide ndarray.tobytes as an alias to tostring Message-ID: <20141009153025.AC93A1C33A1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73864:63793f7aeca2 Date: 2014-10-09 11:29 -0400 http://bitbucket.org/pypy/pypy/changeset/63793f7aeca2/ Log: provide ndarray.tobytes as an alias to tostring diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1387,6 +1387,7 @@ flags = GetSetProperty(W_NDimArray.descr_get_flags), fill = interp2app(W_NDimArray.descr_fill), + tobytes = interp2app(W_NDimArray.descr_tostring), tostring = interp2app(W_NDimArray.descr_tostring), mean = interp2app(W_NDimArray.descr_mean), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3405,6 +3405,7 @@ '@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a' assert array(2.2-1.1j, dtype=' Author: Brian Kearns Branch: Changeset: r73865:84e2e30e7551 Date: 2014-10-09 12:40 -0400 http://bitbucket.org/pypy/pypy/changeset/84e2e30e7551/ Log: implement numpy.result_type function diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -20,6 +20,7 @@ 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', + 'result_type': 'arrayops.result_type', 'where': 'arrayops.where', 'set_string_function': 'appbridge.set_string_function', diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,3 +1,4 @@ +from rpython.rlib import jit from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \ @@ -6,6 +7,7 @@ from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple +from .boxes import W_GenericBox def where(space, w_arr, w_x=None, w_y=None): @@ -283,3 +285,26 @@ else: loop.diagonal_array(space, arr, out, offset, axis1, axis2, shape) return out + + + at jit.unroll_safe +def result_type(space, __args__): + args_w, kw_w = __args__.unpack() + if kw_w: + raise oefmt(space.w_TypeError, "result_type() takes no keyword arguments") + result = None + for w_arg in args_w: + if isinstance(w_arg, W_NDimArray): + dtype = w_arg.get_dtype() + elif isinstance(w_arg, W_GenericBox) or ( + space.isinstance_w(w_arg, space.w_int) or + space.isinstance_w(w_arg, space.w_float) or + space.isinstance_w(w_arg, space.w_complex) or + space.isinstance_w(w_arg, space.w_long) or + space.isinstance_w(w_arg, space.w_bool)): + dtype = ufuncs.find_dtype_for_scalar(space, w_arg) + else: + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) + result = ufuncs.find_binop_result_dtype(space, result, dtype) + return result diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -199,3 +199,17 @@ a.put(23, -1, mode=1) # wrap assert (a == array([0, 1, -10, -1, -15])).all() raises(TypeError, "arange(5).put(22, -5, mode='zzzz')") # unrecognized mode + + def test_result_type(self): + import numpy as np + exc = raises(TypeError, np.result_type, a=2) + assert str(exc.value) == "result_type() takes no keyword arguments" + assert np.result_type(True) is np.dtype('bool') + assert np.result_type(1) is np.dtype('int64') + assert np.result_type(1.) is np.dtype('float64') + assert np.result_type(1+2j) is np.dtype('complex128') + assert np.result_type(1, 1.) is np.dtype('float64') + assert np.result_type(np.array([1, 2])) is np.dtype('int64') + assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128') + assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64') + assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64') From noreply at buildbot.pypy.org Thu Oct 9 19:39:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 19:39:47 +0200 (CEST) Subject: [pypy-commit] pypy default: check for no args in result_type Message-ID: <20141009173947.CC8161C309B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73866:83d8f679414d Date: 2014-10-09 13:39 -0400 http://bitbucket.org/pypy/pypy/changeset/83d8f679414d/ Log: check for no args in result_type diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -292,6 +292,8 @@ args_w, kw_w = __args__.unpack() if kw_w: raise oefmt(space.w_TypeError, "result_type() takes no keyword arguments") + if not args_w: + raise oefmt(space.w_ValueError, "at least one array or dtype is required") result = None for w_arg in args_w: if isinstance(w_arg, W_NDimArray): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -202,6 +202,8 @@ def test_result_type(self): import numpy as np + exc = raises(ValueError, np.result_type) + assert str(exc.value) == "at least one array or dtype is required" exc = raises(TypeError, np.result_type, a=2) assert str(exc.value) == "result_type() takes no keyword arguments" assert np.result_type(True) is np.dtype('bool') From noreply at buildbot.pypy.org Thu Oct 9 20:20:55 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 9 Oct 2014 20:20:55 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: hg merge default Message-ID: <20141009182055.E307E1C025B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73867:77ca6f4b2c85 Date: 2014-10-09 19:20 +0100 http://bitbucket.org/pypy/pypy/changeset/77ca6f4b2c85/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,6 @@ Allocate by 4-byte chunks in rffi_platform, Skip testing objdump if it does not exist, and other small adjustments in own tests + +.. branch: rtyper-stuff +Small internal refactorings in the rtyper. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,6 +37,13 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. +If you wish to override this detection method to use a different compiler +(mingw or a different version of MSVC): + +* set up the PATH and other environment variables as needed +* set the `CC` environment variable to compiler exe to be used, + for a different version of MSVC `SET CC=cl.exe`. + **Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. @@ -264,7 +271,7 @@ Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an -environment variable CC to the compliter exe, testing will use it. +environment variable CC to the compiler exe, testing will use it. .. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,8 +17,6 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() - from pypy.module._socket.interp_func import State - space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,5 +1,6 @@ from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET +from rpython.rlib.rarithmetic import intmask from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, WrappedDefault @@ -46,9 +47,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host, lock) + res = rsocket.gethostbyname_ex(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -60,9 +60,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host, lock) + res = rsocket.gethostbyaddr(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -174,7 +173,7 @@ Convert a 16-bit integer from network to host byte order. """ - return space.wrap(rsocket.ntohs(x)) + return space.wrap(rsocket.ntohs(intmask(x))) @unwrap_spec(x="c_uint") def ntohl(space, x): @@ -190,7 +189,7 @@ Convert a 16-bit integer from host to network byte order. """ - return space.wrap(rsocket.htons(x)) + return space.wrap(rsocket.htons(intmask(x))) @unwrap_spec(x="c_uint") def htonl(space, x): @@ -319,10 +318,3 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) - -class State(object): - def __init__(self, space): - self.netdb_lock = None - - def startup(self, space): - self.netdb_lock = space.allocate_lock() diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -109,10 +109,11 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): + assert isinstance(port, int) if port < 0 or port > 0xffff: raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) - return rffi.cast(rffi.USHORT, port) + return port def make_unsigned_flowinfo(space, flowinfo): if flowinfo < 0 or flowinfo > 0xfffff: @@ -401,8 +402,10 @@ The value argument can either be an integer or a string. """ try: - optval = space.int_w(w_optval) - except: + optval = space.c_int_w(w_optval) + except OperationError, e: + if e.async(space): + raise optval = space.str_w(w_optval) try: self.sock.setsockopt(level, optname, optval) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -498,6 +498,13 @@ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 + # + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 31) + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 32 + 1) + assert s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0 + # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse != 0 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -116,6 +116,8 @@ validate_fd(fileno(fp)) return _feof(fp) +def is_valid_fp(fp): + return is_valid_fd(fileno(fp)) constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, - cpython_struct) + cpython_struct, is_valid_fp) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject @@ -154,6 +154,10 @@ source = "" filename = rffi.charp2str(filename) buf = lltype.malloc(rffi.CCHARP.TO, BUF_SIZE, flavor='raw') + if not is_valid_fp(fp): + lltype.free(buf, flavor='raw') + PyErr_SetFromErrno(space, space.w_IOError) + return None try: while True: count = fread(buf, 1, BUF_SIZE, fp) diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -89,12 +89,12 @@ rffi.free_charp(buf) assert 0 == run("42 * 43") - + assert -1 == run("4..3 * 43") - + assert api.PyErr_Occurred() api.PyErr_Clear() - + def test_run_string(self, space, api): def run(code, start, w_globals, w_locals): buf = rffi.str2charp(code) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,7 +3,7 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop +from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter @@ -134,6 +134,15 @@ if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = shape_converter(space, w_shape, dtype) + for dim in shape: + if dim < 0: + raise OperationError(space.w_ValueError, space.wrap( + "negative dimensions are not allowed")) + try: + support.product(shape) + except OverflowError: + raise OperationError(space.w_ValueError, space.wrap( + "array is too big.")) return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.rarithmetic import ovfcheck def issequence_w(space, w_obj): @@ -23,7 +24,7 @@ def product(s): i = 1 for x in s: - i *= x + i = ovfcheck(i * x) return i diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -384,6 +384,19 @@ assert zeros((), dtype='S').shape == () assert zeros((), dtype='S').dtype == '|S1' + def test_check_shape(self): + import numpy as np + for func in [np.zeros, np.empty]: + exc = raises(ValueError, func, [0, -1, 1], 'int8') + assert str(exc.value) == "negative dimensions are not allowed" + exc = raises(ValueError, func, [2, -1, 3], 'int8') + assert str(exc.value) == "negative dimensions are not allowed" + + exc = raises(ValueError, func, [975]*7, 'int8') + assert str(exc.value) == "array is too big." + exc = raises(ValueError, func, [26244]*5, 'int8') + assert str(exc.value) == "array is too big." + def test_empty_like(self): import numpy as np a = np.empty_like(np.zeros(())) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -599,8 +599,7 @@ 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 8, - 'getfield_gc_pure': 44, + 'getfield_gc_pure': 52, 'guard_class': 4, 'guard_false': 14, 'guard_not_invalidated': 2, diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -85,9 +85,9 @@ p38 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc_pure(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc_pure(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -447,9 +447,9 @@ p29 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p30 = getfield_gc(p29, descr=) p31 = force_token() - p32 = getfield_gc(p29, descr=) + p32 = getfield_gc_pure(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc(p29, descr=) + i34 = getfield_gc_pure(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc(ConstPtr(ptr36), descr=) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -785,6 +785,7 @@ raise Exception("getfield_raw_r (without _pure) not supported") # if immut in (IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY): + op1.opname += "_pure" descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1296,7 +1296,7 @@ assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') assert op1.result is None - assert op2.opname == 'getfield_gc_i' + assert op2.opname == 'getfield_gc_i_pure' assert len(op2.args) == 2 assert op2.args[0] == v_x assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -6,6 +6,7 @@ from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -531,10 +532,14 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC(s, descr='inst_x') - # If 's' is a constant (after optimizations), then we make 's.inst_x' - # a constant too, and we rely on the rest of the optimizations to - # constant-fold the following getfield_gc. + # x = GETFIELD_GC_PURE(s, descr='inst_x') + # If 's' is a constant (after optimizations) we rely on the rest of the + # optimizations to constant-fold the following getfield_gc_pure. + # in addition, we record the dependency here to make invalidation work + # correctly. + # NB: emitting the GETFIELD_GC_PURE is only safe because the + # QUASIIMMUT_FIELD is also emitted to make sure the dependency is + # registered. structvalue = self.getvalue(op.getarg(0)) if not structvalue.is_constant(): self._remove_guard_not_invalidated = True @@ -544,21 +549,14 @@ qmutdescr = op.getdescr() assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed - # already between the tracing and now. In this case, we are - # simply ignoring the QUASIIMMUT_FIELD hint and compiling it - # as a regular getfield. + # already between the tracing and now. In this case, we mark the loop + # as invalid if not qmutdescr.is_still_valid_for(structvalue.get_key_box()): - self._remove_guard_not_invalidated = True - return + raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None - # perform the replacement in the list of operations - fieldvalue = self.getvalue(qmutdescr.constantfieldbox) - cf = self.field_cache(qmutdescr.fielddescr) - cf.force_lazy_setfield(self) - cf.remember_field_value(structvalue, fieldvalue) self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,7 +5200,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure(123, i0, descr=nonwritedescr) finish(i1) """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6893,13 +6893,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(p0, descr=quasifielddescr) + i1 = getfield_gc_pure(p0, descr=quasifielddescr) escape(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc(p0, descr=quasifielddescr) + i1 = getfield_gc_pure(p0, descr=quasifielddescr) escape(i1) jump(p1, p0, i1) """ @@ -6910,7 +6910,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) escape(i1) jump() """ @@ -6962,11 +6962,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_pure(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape(i1) i4 = escape(i2) jump(i3, i4) @@ -6989,11 +6989,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc(p, descr=quasifielddescr) + i1 = getfield_gc_pure(p, descr=quasifielddescr) call_may_force(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc(p, descr=quasifielddescr) + i2 = getfield_gc_pure(p, descr=quasifielddescr) i3 = escape(i1) i4 = escape(i2) jump(i3, i4) @@ -8242,7 +8242,7 @@ quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_pure(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(ConstClass(node_vtable)) @@ -8253,30 +8253,18 @@ setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) """ - expected = """ - [] - p72 = getfield_gc(ConstPtr(myptr2), descr=quasifielddescr) - guard_value(p72, -4247) [] - jump() - """ - self.optimize_loop(ops, expected) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_issue1080_infinitie_loop_simple(self): ops = """ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_pure(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ - expected = """ - [] - p72 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) - guard_value(p72, -4247) [] - jump() - """ - self.optimize_loop(ops, expected) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_only_strengthen_guard_if_class_matches(self): ops = """ diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -81,6 +81,27 @@ assert len(loop.quasi_immutable_deps) == 1 assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + def test_simple_optimize_during_tracing(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7], enable_opts="") + assert res == 700 + # there should be no getfields, even though optimizations are turned off + self.check_resops(guard_not_invalidated=1, getfield_gc=0) + def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) class Foo: @@ -102,7 +123,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc=3) + self.check_resops(guard_not_invalidated=0, getfield_gc=1, getfield_gc_pure=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -154,11 +175,12 @@ residual_call(foo) x -= 1 return total - # + assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc=2) + # the loop is invalid, so nothing is traced + self.check_aborted_count(2) def test_change_during_tracing_2(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -184,7 +206,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=2) + self.check_resops(guard_not_invalidated=0, getfield_gc=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -347,7 +369,7 @@ res = self.meta_interp(f, [100, 7]) assert res == 700 self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, - getarrayitem_gc=0, getfield_gc=0) + getarrayitem_gc=0, getfield_gc=0, getfield_gc_pure=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -355,6 +377,30 @@ assert len(loop.quasi_immutable_deps) == 1 assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + def test_list_optimized_while_tracing(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['lst?[*]'] + def __init__(self, lst): + self.lst = lst + def f(a, x): + lst1 = [0, 0] + lst1[1] = a + foo = Foo(lst1) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.lst[1] + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7], enable_opts="") + assert res == 700 + # operations must have been removed by the frontend + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=1, + getarrayitem_gc=0, getfield_gc=0, getfield_gc_pure=0) + def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) class Foo: diff --git a/rpython/rlib/_stacklet_n_a.py b/rpython/rlib/_stacklet_n_a.py --- a/rpython/rlib/_stacklet_n_a.py +++ b/rpython/rlib/_stacklet_n_a.py @@ -1,33 +1,35 @@ from rpython.rlib import _rffi_stacklet as _c -from rpython.rlib import objectmodel, debug +from rpython.rlib import debug +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.annlowlevel import llhelper -from rpython.tool.staticmethods import StaticMethods -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): + @staticmethod + @specialize.arg(1) def new(thrd, callback, arg): h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) if not h: raise MemoryError return h - new._annspecialcase_ = 'specialize:arg(1)' + @staticmethod def switch(h): h = _c.switch(h) if not h: raise MemoryError return h + @staticmethod def destroy(thrd, h): _c.destroy(thrd._thrd, h) - if objectmodel.we_are_translated(): + if we_are_translated(): debug.debug_print("not using a framework GC: " "stacklet_destroy() may leak") - is_empty_handle = _c.is_empty_handle + is_empty_handle = staticmethod(_c.is_empty_handle) + @staticmethod def get_null_handle(): return _c.null_handle diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -3,7 +3,6 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.tool.staticmethods import StaticMethods NULL_SUSPSTACK = lltype.nullptr(llmemory.GCREF.TO) @@ -68,9 +67,7 @@ return oldsuspstack -class StackletGcRootFinder: - __metaclass__ = StaticMethods - +class StackletGcRootFinder(object): def new(thrd, callback, arg): gcrootfinder.callback = callback thread_handle = thrd._thrd @@ -78,6 +75,7 @@ h = _c.new(thread_handle, llhelper(_c.run_fn, _new_callback), arg) return get_result_suspstack(h) new._dont_inline_ = True + new = staticmethod(new) def switch(suspstack): # suspstack has a handle to target, i.e. where to switch to @@ -90,10 +88,13 @@ h = _c.switch(h) return get_result_suspstack(h) switch._dont_inline_ = True + switch = staticmethod(switch) + @staticmethod def is_empty_handle(suspstack): return not suspstack + @staticmethod def get_null_handle(): return NULL_SUSPSTACK diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -134,8 +134,8 @@ if _CYGWIN: # XXX: macro=True hack for newer versions of Cygwin (as of 12/2012) - c_malloc, _ = external('malloc', [size_t], PTR, macro=True) - c_free, _ = external('free', [PTR], lltype.Void, macro=True) + _, c_malloc_safe = external('malloc', [size_t], PTR, macro=True) + _, c_free_safe = external('free', [PTR], lltype.Void, macro=True) c_memmove, _ = external('memmove', [PTR, PTR, size_t], lltype.Void) @@ -709,7 +709,7 @@ # XXX: JIT memory should be using mmap MAP_PRIVATE with # PROT_EXEC but Cygwin's fork() fails. mprotect() # cannot be used, but seems to be unnecessary there. - res = c_malloc(map_size) + res = c_malloc_safe(map_size) if res == rffi.cast(PTR, 0): raise MemoryError return res @@ -726,7 +726,7 @@ alloc._annenforceargs_ = (int,) if _CYGWIN: - free = c_free + free = c_free_safe else: free = c_munmap_safe diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,9 +18,10 @@ from rpython.rlib import _rsocket_rffi as _c, jit, rgc from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib.rthread import dummy_lock +from rpython.rlib import rthread from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof +from rpython.rtyper.extregistry import ExtRegistryEntry # Usage of @jit.dont_look_inside in this file is possibly temporary @@ -51,6 +52,7 @@ def ntohs(x): + assert isinstance(x, int) return rffi.cast(lltype.Signed, _c.ntohs(x)) def ntohl(x): @@ -58,6 +60,7 @@ return rffi.cast(lltype.Unsigned, _c.ntohl(x)) def htons(x): + assert isinstance(x, int) return rffi.cast(lltype.Signed, _c.htons(x)) def htonl(x): @@ -220,7 +223,6 @@ def get_protocol(self): a = self.lock(_c.sockaddr_ll) proto = rffi.getintfield(a, 'c_sll_protocol') - proto = rffi.cast(rffi.USHORT, proto) res = ntohs(proto) self.unlock() return res @@ -256,7 +258,6 @@ def __init__(self, host, port): makeipaddr(host, self) a = self.lock(_c.sockaddr_in) - port = rffi.cast(rffi.USHORT, port) rffi.setintfield(a, 'c_sin_port', htons(port)) self.unlock() @@ -268,7 +269,7 @@ def get_port(self): a = self.lock(_c.sockaddr_in) - port = ntohs(a.c_sin_port) + port = ntohs(rffi.getintfield(a, 'c_sin_port')) self.unlock() return port @@ -321,7 +322,7 @@ def get_port(self): a = self.lock(_c.sockaddr_in6) - port = ntohs(a.c_sin6_port) + port = ntohs(rffi.getintfield(a, 'c_sin6_port')) self.unlock() return port @@ -1135,18 +1136,18 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name, lock=dummy_lock): +def gethostbyname_ex(name): # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - with lock: + with _get_netdb_lock(): hostent = _c.gethostbyname(name) return gethost_common(name, hostent, addr) -def gethostbyaddr(ip, lock=dummy_lock): +def gethostbyaddr(ip): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - with lock: + with _get_netdb_lock(): p, size = addr.lock_in_addr() try: hostent = _c.gethostbyaddr(p, size, addr.family) @@ -1154,6 +1155,36 @@ addr.unlock() return gethost_common(ip, hostent, addr) +# RPython magic to make _netdb_lock turn either into a regular +# rthread.Lock or a rthread.DummyLock, depending on the config +def _get_netdb_lock(): + return rthread.dummy_lock + +class _Entry(ExtRegistryEntry): + _about_ = _get_netdb_lock + + def compute_annotation(self): + config = self.bookkeeper.annotator.translator.config + if config.translation.thread: + fn = _get_netdb_lock_thread + else: + fn = _get_netdb_lock_nothread + return self.bookkeeper.immutablevalue(fn) + +def _get_netdb_lock_nothread(): + return rthread.dummy_lock + +class _LockCache(object): + lock = None +_lock_cache = _LockCache() + + at jit.elidable +def _get_netdb_lock_thread(): + if _lock_cache.lock is None: + _lock_cache.lock = rthread.allocate_lock() + return _lock_cache.lock +# done RPython magic + def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, address_to_fill=None): @@ -1201,13 +1232,13 @@ servent = _c.getservbyname(name, proto) if not servent: raise RSocketError("service/proto not found") - port = rffi.cast(rffi.UINT, servent.c_s_port) + port = rffi.getintfield(servent, 'c_s_port') return ntohs(port) def getservbyport(port, proto=None): # This function is only called from pypy/module/_socket and the range of # port is checked there - port = rffi.cast(rffi.USHORT, port) + assert isinstance(port, int) servent = _c.getservbyport(htons(port), proto) if not servent: raise RSocketError("port/proto not found") diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -8,8 +8,10 @@ if rffi.sizeof(lltype.UniChar) == 4: MAXUNICODE = 0x10ffff + allow_surrogate_by_default = False else: MAXUNICODE = 0xffff + allow_surrogate_by_default = True BYTEORDER = sys.byteorder @@ -122,7 +124,7 @@ ] def str_decode_utf_8(s, size, errors, final=False, - errorhandler=None, allow_surrogates=False): + errorhandler=None, allow_surrogates=allow_surrogate_by_default): if errorhandler is None: errorhandler = default_unicode_error_decode result = UnicodeBuilder(size) @@ -304,7 +306,7 @@ result.append((chr((0x80 | (ch & 0x3f))))) def unicode_encode_utf_8(s, size, errors, errorhandler=None, - allow_surrogates=False): + allow_surrogates=allow_surrogate_by_default): if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -2,6 +2,7 @@ from rpython.rlib import rsocket from rpython.rlib.rsocket import * import socket as cpy_socket +from rpython.translator.c.test.test_genc import compile def setup_module(mod): @@ -570,4 +571,17 @@ for i in range(nthreads): threads[i].join() assert sum(result) == nthreads - + +def test_translate_netdb_lock(): + def f(): + gethostbyaddr("localhost") + return 0 + fc = compile(f, []) + assert fc() == 0 + +def test_translate_netdb_lock_thread(): + def f(): + gethostbyaddr("localhost") + return 0 + fc = compile(f, [], thread=True) + assert fc() == 0 diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,6 +191,11 @@ def _is_varsize(self): return False + def _contains_value(self, value): + if self is Void: + return True + return isCompatibleType(typeOf(value), self) + NFOUND = object() class ContainerType(LowLevelType): diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -38,12 +38,14 @@ i += s.length() cls.ll_strsetitem_nonneg(s, i, item) + @staticmethod def ll_strsetitem_nonneg(s, i, item): chars = s.chars ll_assert(i >= 0, "negative str getitem index") ll_assert(i < len(chars), "str getitem index out of bound") chars[i] = chr(item) + @staticmethod def ll_stritem_nonneg(s, i): return ord(rstr.LLHelpers.ll_stritem_nonneg(s, i)) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -270,6 +270,7 @@ class LLHelpers(AbstractLLHelpers): from rpython.rtyper.annlowlevel import llstr, llunicode + @staticmethod @jit.elidable def ll_str_mul(s, times): if times < 0: @@ -292,6 +293,7 @@ i += j return newstr + @staticmethod @jit.elidable def ll_char_mul(ch, times): if typeOf(ch) is Char: @@ -308,9 +310,11 @@ j += 1 return newstr + @staticmethod def ll_strlen(s): return len(s.chars) + @staticmethod @signature(types.any(), types.int(), returns=types.any()) def ll_stritem_nonneg(s, i): chars = s.chars @@ -318,6 +322,7 @@ ll_assert(i < len(chars), "str getitem index out of bound") return chars[i] + @staticmethod def ll_chr2str(ch): if typeOf(ch) is Char: malloc = mallocstr @@ -328,6 +333,7 @@ return s # @jit.look_inside_iff(lambda str: jit.isconstant(len(str.chars)) and len(str.chars) == 1) + @staticmethod @jit.oopspec("str.str2unicode(str)") def ll_str2unicode(str): lgt = len(str.chars) @@ -338,6 +344,7 @@ s.chars[i] = cast_primitive(UniChar, str.chars[i]) return s + @staticmethod def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY @@ -347,6 +354,7 @@ b.chars[i] = str.chars[i] return b + @staticmethod @jit.elidable def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 @@ -362,13 +370,17 @@ s.hash = x return x + @staticmethod def ll_length(s): return len(s.chars) + @staticmethod def ll_strfasthash(s): return s.hash # assumes that the hash is already computed + @staticmethod @jit.elidable + @jit.oopspec('stroruni.concat(s1, s2)') def ll_strconcat(s1, s2): len1 = s1.length() len2 = s2.length() @@ -386,8 +398,8 @@ else: newstr.copy_contents(s2, newstr, 0, len1, len2) return newstr - ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' + @staticmethod @jit.elidable def ll_strip(s, ch, left, right): s_len = len(s.chars) @@ -408,6 +420,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_default(s, left, right): s_len = len(s.chars) @@ -428,6 +441,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_strip_multiple(s, s2, left, right): s_len = len(s.chars) @@ -448,6 +462,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result + @staticmethod @jit.elidable def ll_upper(s): s_chars = s.chars @@ -462,6 +477,7 @@ i += 1 return result + @staticmethod @jit.elidable def ll_lower(s): s_chars = s.chars @@ -476,6 +492,7 @@ i += 1 return result + @staticmethod def ll_join(s, length, items): s_chars = s.chars s_len = len(s_chars) @@ -509,7 +526,9 @@ i += 1 return result + @staticmethod @jit.elidable + @jit.oopspec('stroruni.cmp(s1, s2)') def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -531,9 +550,10 @@ return diff i += 1 return len1 - len2 - ll_strcmp.oopspec = 'stroruni.cmp(s1, s2)' + @staticmethod @jit.elidable + @jit.oopspec('stroruni.equal(s1, s2)') def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -551,8 +571,8 @@ return False j += 1 return True - ll_streq.oopspec = 'stroruni.equal(s1, s2)' + @staticmethod @jit.elidable def ll_startswith(s1, s2): len1 = len(s1.chars) @@ -569,11 +589,13 @@ return True + @staticmethod def ll_startswith_char(s, ch): if not len(s.chars): return False return s.chars[0] == ch + @staticmethod @jit.elidable def ll_endswith(s1, s2): len1 = len(s1.chars) @@ -591,11 +613,13 @@ return True + @staticmethod def ll_endswith_char(s, ch): if not len(s.chars): return False return s.chars[len(s.chars) - 1] == ch + @staticmethod @jit.elidable @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find_char(s, ch, start, end): @@ -608,6 +632,7 @@ i += 1 return -1 + @staticmethod @jit.elidable def ll_rfind_char(s, ch, start, end): if end > len(s.chars): @@ -619,6 +644,7 @@ return i return -1 + @staticmethod @jit.elidable def ll_count_char(s, ch, start, end): count = 0 @@ -631,6 +657,7 @@ i += 1 return count + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: @@ -646,6 +673,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @staticmethod @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: @@ -681,6 +709,7 @@ res = 0 return res + @staticmethod @jit.elidable def ll_search(s1, s2, start, end, mode): count = 0 @@ -768,6 +797,7 @@ return -1 return count + @staticmethod @signature(types.int(), types.any(), returns=types.any()) @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic( items, length)) @@ -802,6 +832,7 @@ i += 1 return result + @staticmethod @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars)) def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder @@ -821,6 +852,7 @@ i += 1 return result + @staticmethod @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) @jit.elidable @@ -836,9 +868,11 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + @staticmethod def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @staticmethod @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): @@ -851,10 +885,12 @@ stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop) + @staticmethod def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) + @staticmethod def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -889,6 +925,7 @@ item.copy_contents(s, item, i, 0, j - i) return res + @staticmethod def ll_split(LIST, s, c, max): count = 1 if max == -1: @@ -920,6 +957,7 @@ item.copy_contents(s, item, prev_pos, 0, last - prev_pos) return res + @staticmethod def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -955,6 +993,7 @@ item.copy_contents(s, item, j, 0, i - j) return res + @staticmethod def ll_rsplit(LIST, s, c, max): count = 1 if max == -1: @@ -986,6 +1025,7 @@ item.copy_contents(s, item, 0, 0, prev_pos) return res + @staticmethod @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1001,6 +1041,7 @@ j += 1 return newstr + @staticmethod @jit.elidable def ll_contains(s, c): chars = s.chars @@ -1012,6 +1053,7 @@ i += 1 return False + @staticmethod @jit.elidable def ll_int(s, base): if not 2 <= base <= 36: @@ -1068,23 +1110,29 @@ # ll_build_push(x, next_string, n-1) # s = ll_build_finish(x) + @staticmethod def ll_build_start(parts_count): return malloc(TEMP, parts_count) + @staticmethod def ll_build_push(builder, next_string, index): builder[index] = next_string + @staticmethod def ll_build_finish(builder): return LLHelpers.ll_join_strs(len(builder), builder) + @staticmethod @specialize.memo() def ll_constant(s): return string_repr.convert_const(s) + @staticmethod @specialize.memo() def ll_constant_unicode(s): return unicode_repr.convert_const(s) + @classmethod def do_stringformat(cls, hop, sourcevarsrepr): s_str = hop.args_s[0] assert s_str.is_constant() @@ -1150,8 +1198,8 @@ hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%' return hop.gendirectcall(cls.ll_join_strs, size, vtemp) - do_stringformat = classmethod(do_stringformat) + @staticmethod @jit.dont_look_inside def ll_string2list(RESLIST, src): length = len(src.chars) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,8 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, - LowLevelType, isCompatibleType) +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -120,14 +119,9 @@ def convert_const(self, value): "Convert the given constant value to the low-level repr of 'self'." - if self.lowleveltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError, TypeError): - realtype = '???' - if realtype != self.lowleveltype: - raise TyperError("convert_const(self = %r, value = %r)" % ( - self, value)) + if not self.lowleveltype._contains_value(value): + raise TyperError("convert_const(self = %r, value = %r)" % ( + self, value)) return value def get_ll_eq_function(self): @@ -356,18 +350,9 @@ lltype = reqtype else: raise TypeError(repr(reqtype)) - # Void Constants can hold any value; - # non-Void Constants must hold a correctly ll-typed value - if lltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError): - realtype = '???' - if not isCompatibleType(realtype, lltype): - raise TyperError("inputconst(reqtype = %s, value = %s):\n" - "expected a %r,\n" - " got a %r" % (reqtype, value, - lltype, realtype)) + if not lltype._contains_value(value): + raise TyperError("inputconst(): expected a %r, got %r" % + (lltype, value)) c = Constant(value) c.concretetype = lltype return c @@ -422,7 +407,8 @@ def __ne__(self, other): return not (self == other) - def build_ll_dummy_value(self): + @property + def ll_dummy_value(self): TYPE = self.TYPE try: return self.rtyper.cache_dummy_values[TYPE] @@ -435,8 +421,6 @@ self.rtyper.cache_dummy_values[TYPE] = p return p - ll_dummy_value = property(build_ll_dummy_value) - # logging/warning diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -9,7 +9,6 @@ from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name -from rpython.tool.staticmethods import StaticMethods from rpython.rlib.rstring import UnicodeBuilder @@ -800,10 +799,8 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem -class AbstractLLHelpers: - __metaclass__ = StaticMethods - +class AbstractLLHelpers(object): + @staticmethod def ll_isdigit(s): from rpython.rtyper.annlowlevel import hlstr @@ -815,6 +812,7 @@ return False return True + @staticmethod def ll_isalpha(s): from rpython.rtyper.annlowlevel import hlstr @@ -826,6 +824,7 @@ return False return True + @staticmethod def ll_isalnum(s): from rpython.rtyper.annlowlevel import hlstr @@ -837,14 +836,17 @@ return False return True + @staticmethod def ll_char_isspace(ch): c = ord(ch) return c == 32 or (9 <= c <= 13) # c in (9, 10, 11, 12, 13, 32) + @staticmethod def ll_char_isdigit(ch): c = ord(ch) return c <= 57 and c >= 48 + @staticmethod def ll_char_isalpha(ch): c = ord(ch) if c >= 97: @@ -852,6 +854,7 @@ else: return 65 <= c <= 90 + @staticmethod def ll_char_isalnum(ch): c = ord(ch) if c >= 65: @@ -862,47 +865,54 @@ else: return 48 <= c <= 57 + @staticmethod def ll_char_isupper(ch): c = ord(ch) return 65 <= c <= 90 + @staticmethod def ll_char_islower(ch): c = ord(ch) return 97 <= c <= 122 + @staticmethod def ll_upper_char(ch): if 'a' <= ch <= 'z': ch = chr(ord(ch) - 32) return ch + @staticmethod def ll_lower_char(ch): if 'A' <= ch <= 'Z': ch = chr(ord(ch) + 32) return ch + @staticmethod def ll_char_hash(ch): return ord(ch) + @staticmethod def ll_unichar_hash(ch): return ord(ch) + @classmethod def ll_str_is_true(cls, s): # check if a string is True, allowing for None return bool(s) and cls.ll_strlen(s) != 0 - ll_str_is_true = classmethod(ll_str_is_true) + @classmethod def ll_stritem_nonneg_checked(cls, s, i): if i >= cls.ll_strlen(s): raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_nonneg_checked = classmethod(ll_stritem_nonneg_checked) + @classmethod def ll_stritem(cls, s, i): if i < 0: i += cls.ll_strlen(s) return cls.ll_stritem_nonneg(s, i) - ll_stritem = classmethod(ll_stritem) + @classmethod def ll_stritem_checked(cls, s, i): length = cls.ll_strlen(s) if i < 0: @@ -910,8 +920,8 @@ if i >= length or i < 0: raise IndexError return cls.ll_stritem_nonneg(s, i) - ll_stritem_checked = classmethod(ll_stritem_checked) + @staticmethod def parse_fmt_string(fmt): # we support x, d, s, f, [r] it = iter(fmt) @@ -937,6 +947,7 @@ r.append(curstr) return r + @staticmethod def ll_float(ll_str): from rpython.rtyper.annlowlevel import hlstr from rpython.rlib.rfloat import rstring_to_float @@ -961,6 +972,7 @@ assert end >= 0 return rstring_to_float(s[beg:end + 1]) + @classmethod def ll_splitlines(cls, LIST, ll_str, keep_newlines): from rpython.rtyper.annlowlevel import hlstr s = hlstr(ll_str) @@ -991,4 +1003,3 @@ item = cls.ll_stringslice_startstop(ll_str, j, strlen) res.ll_setitem_fast(list_length, item) return res - ll_splitlines = classmethod(ll_splitlines) diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py deleted file mode 100644 --- a/rpython/tool/staticmethods.py +++ /dev/null @@ -1,14 +0,0 @@ -import types -class AbstractMethods(type): - def __new__(cls, cls_name, bases, cls_dict): - for key, value in cls_dict.iteritems(): - if isinstance(value, types.FunctionType): - cls_dict[key] = cls.decorator(value) - return type.__new__(cls, cls_name, bases, cls_dict) - - -class StaticMethods(AbstractMethods): - """ - Metaclass that turns plain methods into staticmethods. - """ - decorator = staticmethod diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -354,8 +354,10 @@ platform = pick_platform(new_platform, cc) if not platform: raise ValueError("pick_platform(%r, %s) failed"%(new_platform, cc)) - log.msg("Set platform with %r cc=%s, using cc=%r" % (new_platform, cc, - getattr(platform, 'cc','Unknown'))) + log.msg("Set platform with %r cc=%s, using cc=%r, version=%r" % (new_platform, cc, + getattr(platform, 'cc','Unknown'), + getattr(platform, 'version','Unknown'), + )) if new_platform == 'host': global host diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -14,9 +14,11 @@ if not cc: cc = os.environ.get('CC','') if not cc: - return MsvcPlatform(cc=cc, x64=x64_flag) + return MsvcPlatform(x64=x64_flag) elif cc.startswith('mingw') or cc == 'gcc': return MingwPlatform(cc) + else: + return MsvcPlatform(cc=cc, x64=x64_flag) try: subprocess.check_output([cc, '--version']) except: @@ -108,11 +110,14 @@ def __init__(self, cc=None, x64=False): self.x64 = x64 - msvc_compiler_environ = find_msvc_env(x64) - Platform.__init__(self, 'cl.exe') - if msvc_compiler_environ: - self.c_environ = os.environ.copy() - self.c_environ.update(msvc_compiler_environ) + if cc is None: + msvc_compiler_environ = find_msvc_env(x64) + Platform.__init__(self, 'cl.exe') + if msvc_compiler_environ: + self.c_environ = os.environ.copy() + self.c_environ.update(msvc_compiler_environ) + else: + self.cc = cc # detect version of current compiler returncode, stdout, stderr = _run_subprocess(self.cc, '', From noreply at buildbot.pypy.org Thu Oct 9 21:44:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 21:44:08 +0200 (CEST) Subject: [pypy-commit] pypy default: fix searchsorted with multidim targets Message-ID: <20141009194408.7A0AD1C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73868:188aa764d2e7 Date: 2014-10-09 15:18 -0400 http://bitbucket.org/pypy/pypy/changeset/188aa764d2e7/ Log: fix searchsorted with multidim targets diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -738,8 +738,6 @@ if len(self.get_shape()) > 1: raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) - if len(v.get_shape()) > 1: - raise oefmt(space.w_ValueError, "v must be a 1-d array-like") ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -375,9 +375,8 @@ op = operator.lt else: op = operator.le - if v.size < 2: - result[...] = _searchsort(a, op, v) - else: - for i in range(v.size): - result[i] = _searchsort(a, op, v[i]) + v = v.flat + result = result.flat + for i in xrange(len(v)): + result[i] = _searchsort(a, op, v[i]) """, filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -354,25 +354,36 @@ import numpy as np import sys a = np.arange(1, 6) + ret = a.searchsorted(3) assert ret == 2 assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array(3)) assert ret == 2 assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array([3])) assert ret == 2 assert isinstance(ret, np.ndarray) + + ret = a.searchsorted(np.array([[2, 3]])) + assert (ret == [1, 2]).all() + assert ret.shape == (1, 2) + ret = a.searchsorted(3, side='right') assert ret == 3 assert isinstance(ret, np.generic) + exc = raises(ValueError, a.searchsorted, 3, side=None) assert str(exc.value) == "expected nonempty string for keyword 'side'" exc = raises(ValueError, a.searchsorted, 3, side='') assert str(exc.value) == "expected nonempty string for keyword 'side'" exc = raises(ValueError, a.searchsorted, 3, side=2) assert str(exc.value) == "expected nonempty string for keyword 'side'" + ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") From noreply at buildbot.pypy.org Thu Oct 9 21:44:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 21:44:09 +0200 (CEST) Subject: [pypy-commit] pypy default: optimize searchsorted for case of sorted keys Message-ID: <20141009194409.AB82E1C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73869:0e32a2a7d77c Date: 2014-10-09 15:38 -0400 http://bitbucket.org/pypy/pypy/changeset/0e32a2a7d77c/ Log: optimize searchsorted for case of sorted keys diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -359,24 +359,31 @@ app_searchsort = applevel(r""" import operator - def _searchsort(a, op, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if op(a[imid], val): - imin = imid + 1 - else: - imax = imid - return imin - - def searchsort(a, v, side, result): + def searchsort(arr, val, side, res): + val = val.flat + res = res.flat if side == 0: op = operator.lt else: op = operator.le - v = v.flat - result = result.flat - for i in xrange(len(v)): - result[i] = _searchsort(a, op, v[i]) + + size = arr.size + imin = 0 + imax = size + last = val[0] + for i in xrange(len(val)): + key = val[i] + if last < key: + imax = size + else: + imin = 0 + imax = imax + 1 if imax < size else size + last = key + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if op(arr[imid], key): + imin = imid + 1 + else: + imax = imid + res[i] = imin """, filename=__file__).interphook('searchsort') From noreply at buildbot.pypy.org Thu Oct 9 21:44:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 9 Oct 2014 21:44:10 +0200 (CEST) Subject: [pypy-commit] pypy default: fix searchsorted on empty input Message-ID: <20141009194410.E41BA1C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73870:fad9ee2fdd2c Date: 2014-10-09 15:43 -0400 http://bitbucket.org/pypy/pypy/changeset/fad9ee2fdd2c/ Log: fix searchsorted on empty input diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -370,7 +370,10 @@ size = arr.size imin = 0 imax = size - last = val[0] + try: + last = val[0] + except IndexError: + return for i in xrange(len(val)): key = val[i] if last < key: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -363,6 +363,10 @@ assert ret == 2 assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array([])) + assert isinstance(ret, np.ndarray) + assert ret.shape == (0,) + ret = a.searchsorted(np.array([3])) assert ret == 2 assert isinstance(ret, np.ndarray) From noreply at buildbot.pypy.org Thu Oct 9 23:05:59 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 9 Oct 2014 23:05:59 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Avoid deadlock in test_ssl when the function is missing Message-ID: <20141009210559.6BE9B1D22A7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73873:3afee8df1893 Date: 2014-10-09 23:04 +0200 http://bitbucket.org/pypy/pypy/changeset/3afee8df1893/ Log: Avoid deadlock in test_ssl when the function is missing diff --git a/lib-python/3/test/test_ssl.py b/lib-python/3/test/test_ssl.py --- a/lib-python/3/test/test_ssl.py +++ b/lib-python/3/test/test_ssl.py @@ -1064,7 +1064,8 @@ self.sslconn = self.server.context.wrap_socket( self.sock, server_side=True) self.server.selected_protocols.append(self.sslconn.selected_npn_protocol()) - except (ssl.SSLError, ConnectionResetError) as e: + except (ssl.SSLError, ConnectionResetError, + AttributeError) as e: # We treat ConnectionResetError as though it were an # SSLError - OpenSSL on Ubuntu abruptly closes the # connection when asked to use an unsupported protocol. From noreply at buildbot.pypy.org Thu Oct 9 23:05:58 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 9 Oct 2014 23:05:58 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Really validate the ast in compile()... Message-ID: <20141009210558.3926E1D22A7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73872:ec5e03429d1d Date: 2014-10-09 22:03 +0200 http://bitbucket.org/pypy/pypy/changeset/ec5e03429d1d/ Log: Really validate the ast in compile()... diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -45,6 +45,7 @@ if space.isinstance_w(w_source, space.gettypeobject(ast.W_AST.typedef)): ast_node = ast.mod.from_object(space, w_source) + ec.compiler.validate_ast(ast_node) code = ec.compiler.compile_ast(ast_node, filename, mode, flags, optimize=optimize) return space.wrap(code) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -449,3 +449,26 @@ empty_yield_from.body[0].body[0].value.value = None exc = raises(ValueError, compile, empty_yield_from, "", "exec") assert "field value is required" in str(exc.value) + + def test_compare(self): + import ast + + def _mod(mod, msg=None, mode="exec", exc=ValueError): + mod.lineno = mod.col_offset = 0 + ast.fix_missing_locations(mod) + exc = raises(exc, compile, mod, "", mode) + if msg is not None: + assert msg in str(exc.value) + def _expr(node, msg=None, exc=ValueError): + mod = ast.Module([ast.Expr(node)]) + _mod(mod, msg, exc=exc) + left = ast.Name("x", ast.Load()) + comp = ast.Compare(left, [ast.In()], []) + _expr(comp, "no comparators") + comp = ast.Compare(left, [ast.In()], [ast.Num(4), ast.Num(5)]) + _expr(comp, "different number of comparators and operands") + comp = ast.Compare(ast.Num("blah"), [ast.In()], [left]) + _expr(comp, "non-numeric", exc=TypeError) + comp = ast.Compare(left, [ast.In()], [ast.Num("blah")]) + _expr(comp, "non-numeric", exc=TypeError) + From noreply at buildbot.pypy.org Thu Oct 9 23:05:56 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 9 Oct 2014 23:05:56 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix crash in ast module: check the presence of mandatory objects Message-ID: <20141009210556.DF9981D22A7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73871:73eefdc1c418 Date: 2014-10-09 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/73eefdc1c418/ Log: Fix crash in ast module: check the presence of mandatory objects diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -8,10 +8,9 @@ from pypy.interpreter.gateway import interp2app -def raise_attriberr(space, w_obj, name): - raise oefmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", w_obj, name) - +def raise_required_value(space, w_obj, name): + raise oefmt(space.w_ValueError, + "field %s is required for %T", name, w_obj) def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -261,6 +260,8 @@ def from_object(space, w_node): w_body = get_field(space, w_node, 'body', False) _body = expr.from_object(space, w_body) + if _body is None: + raise_required_value(space, w_node, 'body') return Expression(_body) State.ast_type('Expression', 'mod', ['body']) @@ -416,7 +417,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _name = space.identifier_w(w_name) + if _name is None: + raise_required_value(space, w_node, 'name') _args = arguments.from_object(space, w_args) + if _args is None: + raise_required_value(space, w_node, 'args') body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) @@ -509,6 +514,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _name = space.identifier_w(w_name) + if _name is None: + raise_required_value(space, w_node, 'name') bases_w = space.unpackiterable(w_bases) _bases = [expr.from_object(space, w_item) for w_item in bases_w] keywords_w = space.unpackiterable(w_keywords) @@ -646,6 +653,8 @@ targets_w = space.unpackiterable(w_targets) _targets = [expr.from_object(space, w_item) for w_item in targets_w] _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Assign(_targets, _value, _lineno, _col_offset) @@ -691,8 +700,14 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _target = expr.from_object(space, w_target) + if _target is None: + raise_required_value(space, w_node, 'target') _op = operator.from_object(space, w_op) + if _op is None: + raise_required_value(space, w_node, 'op') _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return AugAssign(_target, _op, _value, _lineno, _col_offset) @@ -754,7 +769,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _target = expr.from_object(space, w_target) + if _target is None: + raise_required_value(space, w_node, 'target') _iter = expr.from_object(space, w_iter) + if _iter is None: + raise_required_value(space, w_node, 'iter') body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) @@ -815,6 +834,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _test = expr.from_object(space, w_test) + if _test is None: + raise_required_value(space, w_node, 'test') body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) @@ -875,6 +896,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _test = expr.from_object(space, w_test) + if _test is None: + raise_required_value(space, w_node, 'test') body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) @@ -1139,6 +1162,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _test = expr.from_object(space, w_test) + if _test is None: + raise_required_value(space, w_node, 'test') _msg = expr.from_object(space, w_msg) if w_msg is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) @@ -1350,6 +1375,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Expr(_value, _lineno, _col_offset) @@ -1551,6 +1578,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _op = boolop.from_object(space, w_op) + if _op is None: + raise_required_value(space, w_node, 'op') values_w = space.unpackiterable(w_values) _values = [expr.from_object(space, w_item) for w_item in values_w] _lineno = space.int_w(w_lineno) @@ -1598,8 +1627,14 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _left = expr.from_object(space, w_left) + if _left is None: + raise_required_value(space, w_node, 'left') _op = operator.from_object(space, w_op) + if _op is None: + raise_required_value(space, w_node, 'op') _right = expr.from_object(space, w_right) + if _right is None: + raise_required_value(space, w_node, 'right') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return BinOp(_left, _op, _right, _lineno, _col_offset) @@ -1640,7 +1675,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _op = unaryop.from_object(space, w_op) + if _op is None: + raise_required_value(space, w_node, 'op') _operand = expr.from_object(space, w_operand) + if _operand is None: + raise_required_value(space, w_node, 'operand') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return UnaryOp(_op, _operand, _lineno, _col_offset) @@ -1682,7 +1721,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _args = arguments.from_object(space, w_args) + if _args is None: + raise_required_value(space, w_node, 'args') _body = expr.from_object(space, w_body) + if _body is None: + raise_required_value(space, w_node, 'body') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Lambda(_args, _body, _lineno, _col_offset) @@ -1729,8 +1772,14 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _test = expr.from_object(space, w_test) + if _test is None: + raise_required_value(space, w_node, 'test') _body = expr.from_object(space, w_body) + if _body is None: + raise_required_value(space, w_node, 'body') _orelse = expr.from_object(space, w_orelse) + if _orelse is None: + raise_required_value(space, w_node, 'orelse') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return IfExp(_test, _body, _orelse, _lineno, _col_offset) @@ -1873,6 +1922,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _elt = expr.from_object(space, w_elt) + if _elt is None: + raise_required_value(space, w_node, 'elt') generators_w = space.unpackiterable(w_generators) _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) @@ -1921,6 +1972,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _elt = expr.from_object(space, w_elt) + if _elt is None: + raise_required_value(space, w_node, 'elt') generators_w = space.unpackiterable(w_generators) _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) @@ -1974,7 +2027,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _key = expr.from_object(space, w_key) + if _key is None: + raise_required_value(space, w_node, 'key') _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') generators_w = space.unpackiterable(w_generators) _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) @@ -2023,6 +2080,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _elt = expr.from_object(space, w_elt) + if _elt is None: + raise_required_value(space, w_node, 'elt') generators_w = space.unpackiterable(w_generators) _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) @@ -2098,6 +2157,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return YieldFrom(_value, _lineno, _col_offset) @@ -2152,6 +2213,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _left = expr.from_object(space, w_left) + if _left is None: + raise_required_value(space, w_node, 'left') ops_w = space.unpackiterable(w_ops) _ops = [cmpop.from_object(space, w_item) for w_item in ops_w] comparators_w = space.unpackiterable(w_comparators) @@ -2224,6 +2287,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _func = expr.from_object(space, w_func) + if _func is None: + raise_required_value(space, w_node, 'func') args_w = space.unpackiterable(w_args) _args = [expr.from_object(space, w_item) for w_item in args_w] keywords_w = space.unpackiterable(w_keywords) @@ -2265,6 +2330,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _n = w_n + if _n is None: + raise_required_value(space, w_node, 'n') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Num(_n, _lineno, _col_offset) @@ -2300,6 +2367,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _s = check_string(space, w_s) + if _s is None: + raise_required_value(space, w_node, 's') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Str(_s, _lineno, _col_offset) @@ -2335,6 +2404,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _s = check_string(space, w_s) + if _s is None: + raise_required_value(space, w_node, 's') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Bytes(_s, _lineno, _col_offset) @@ -2409,8 +2480,14 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _attr = space.identifier_w(w_attr) + if _attr is None: + raise_required_value(space, w_node, 'attr') _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Attribute(_value, _attr, _ctx, _lineno, _col_offset) @@ -2456,8 +2533,14 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _slice = slice.from_object(space, w_slice) + if _slice is None: + raise_required_value(space, w_node, 'slice') _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Subscript(_value, _slice, _ctx, _lineno, _col_offset) @@ -2498,7 +2581,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Starred(_value, _ctx, _lineno, _col_offset) @@ -2538,7 +2625,11 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _id = space.identifier_w(w_id) + if _id is None: + raise_required_value(space, w_node, 'id') _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Name(_id, _ctx, _lineno, _col_offset) @@ -2586,6 +2677,8 @@ elts_w = space.unpackiterable(w_elts) _elts = [expr.from_object(space, w_item) for w_item in elts_w] _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return List(_elts, _ctx, _lineno, _col_offset) @@ -2633,6 +2726,8 @@ elts_w = space.unpackiterable(w_elts) _elts = [expr.from_object(space, w_item) for w_item in elts_w] _ctx = expr_context.from_object(space, w_ctx) + if _ctx is None: + raise_required_value(space, w_node, 'ctx') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Tuple(_elts, _ctx, _lineno, _col_offset) @@ -2668,6 +2763,8 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = w_value + if _value is None: + raise_required_value(space, w_node, 'value') _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Const(_value, _lineno, _col_offset) @@ -2852,6 +2949,8 @@ def from_object(space, w_node): w_value = get_field(space, w_node, 'value', False) _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') return Index(_value) State.ast_type('Index', 'slice', ['value']) @@ -3190,7 +3289,11 @@ w_iter = get_field(space, w_node, 'iter', False) w_ifs = get_field(space, w_node, 'ifs', False) _target = expr.from_object(space, w_target) + if _target is None: + raise_required_value(space, w_node, 'target') _iter = expr.from_object(space, w_iter) + if _iter is None: + raise_required_value(space, w_node, 'iter') ifs_w = space.unpackiterable(w_ifs) _ifs = [expr.from_object(space, w_item) for w_item in ifs_w] return comprehension(_target, _iter, _ifs) @@ -3386,6 +3489,8 @@ w_arg = get_field(space, w_node, 'arg', False) w_annotation = get_field(space, w_node, 'annotation', True) _arg = space.identifier_w(w_arg) + if _arg is None: + raise_required_value(space, w_node, 'arg') _annotation = expr.from_object(space, w_annotation) if w_annotation is not None else None return arg(_arg, _annotation) @@ -3417,7 +3522,11 @@ w_arg = get_field(space, w_node, 'arg', False) w_value = get_field(space, w_node, 'value', False) _arg = space.identifier_w(w_arg) + if _arg is None: + raise_required_value(space, w_node, 'arg') _value = expr.from_object(space, w_value) + if _value is None: + raise_required_value(space, w_node, 'value') return keyword(_arg, _value) State.ast_type('keyword', 'AST', ['arg', 'value']) @@ -3447,6 +3556,8 @@ w_name = get_field(space, w_node, 'name', False) w_asname = get_field(space, w_node, 'asname', True) _name = space.identifier_w(w_name) + if _name is None: + raise_required_value(space, w_node, 'name') _asname = space.str_or_None_w(w_asname) return alias(_name, _asname) @@ -3480,6 +3591,8 @@ w_context_expr = get_field(space, w_node, 'context_expr', False) w_optional_vars = get_field(space, w_node, 'optional_vars', True) _context_expr = expr.from_object(space, w_context_expr) + if _context_expr is None: + raise_required_value(space, w_node, 'context_expr') _optional_vars = expr.from_object(space, w_optional_vars) if w_optional_vars is not None else None return withitem(_context_expr, _optional_vars) diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -189,6 +189,11 @@ else: value = self.get_value_extractor(field, "w_%s" % (field.name,)) lines = ["_%s = %s" % (field.name, value)] + if not field.opt and field.type.value not in ("int",): + lines.append("if _%s is None:" % (field.name,)) + lines.append(" raise_required_value(space, w_node, '%s')" + % (field.name,)) + return lines def make_converters(self, fields, name, extras=None): @@ -408,10 +413,9 @@ from pypy.interpreter.gateway import interp2app -def raise_attriberr(space, w_obj, name): - raise oefmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", w_obj, name) - +def raise_required_value(space, w_obj, name): + raise oefmt(space.w_ValueError, + "field %s is required for %T", name, w_obj) def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -441,3 +441,11 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} + + def test_empty_yield_from(self): + # Issue 16546: yield from value is not optional. + import ast + empty_yield_from = ast.parse("def f():\n yield from g()") + empty_yield_from.body[0].body[0].value.value = None + exc = raises(ValueError, compile, empty_yield_from, "", "exec") + assert "field value is required" in str(exc.value) From noreply at buildbot.pypy.org Fri Oct 10 06:25:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 06:25:42 +0200 (CEST) Subject: [pypy-commit] pypy default: put flatiter typedef with class Message-ID: <20141010042542.61D111C0605@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73874:f4c2ef76f510 Date: 2014-10-09 16:27 -0400 http://bitbucket.org/pypy/pypy/changeset/f4c2ef76f510/ Log: put flatiter typedef with class diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -1,7 +1,10 @@ from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import loop -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.base import convert_to_array from pypy.module.micronumpy.concrete import BaseConcreteArray +from .ndarray import W_NDimArray class FakeArrayImplementation(BaseConcreteArray): @@ -90,4 +93,21 @@ def descr_base(self, space): return space.wrap(self.base) -# typedef is in interp_ndarray, so we see the additional arguments +W_FlatIterator.typedef = TypeDef("numpy.flatiter", + __iter__ = interp2app(W_FlatIterator.descr_iter), + __getitem__ = interp2app(W_FlatIterator.descr_getitem), + __setitem__ = interp2app(W_FlatIterator.descr_setitem), + __len__ = interp2app(W_FlatIterator.descr_len), + + __eq__ = interp2app(W_FlatIterator.descr_eq), + __ne__ = interp2app(W_FlatIterator.descr_ne), + __lt__ = interp2app(W_FlatIterator.descr_lt), + __le__ = interp2app(W_FlatIterator.descr_le), + __gt__ = interp2app(W_FlatIterator.descr_gt), + __ge__ = interp2app(W_FlatIterator.descr_ge), + + next = interp2app(W_FlatIterator.descr_next), + base = GetSetProperty(W_FlatIterator.descr_base), + index = GetSetProperty(W_FlatIterator.descr_index), + coords = GetSetProperty(W_FlatIterator.descr_coords), +) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -18,7 +18,6 @@ from pypy.module.micronumpy.converters import multi_axis_converter, \ order_converter, shape_converter, searchside_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple from .selection import app_searchsort @@ -482,6 +481,7 @@ loop.flatiter_setitem(space, dtype, arr, iter, state, 1, iter.size) def descr_get_flatiter(self, space): + from .flatiter import W_FlatIterator return space.wrap(W_FlatIterator(self)) def descr_item(self, space, __args__): @@ -1464,23 +1464,3 @@ def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) - - -W_FlatIterator.typedef = TypeDef("numpy.flatiter", - __iter__ = interp2app(W_FlatIterator.descr_iter), - __getitem__ = interp2app(W_FlatIterator.descr_getitem), - __setitem__ = interp2app(W_FlatIterator.descr_setitem), - __len__ = interp2app(W_FlatIterator.descr_len), - - __eq__ = interp2app(W_FlatIterator.descr_eq), - __ne__ = interp2app(W_FlatIterator.descr_ne), - __lt__ = interp2app(W_FlatIterator.descr_lt), - __le__ = interp2app(W_FlatIterator.descr_le), - __gt__ = interp2app(W_FlatIterator.descr_gt), - __ge__ = interp2app(W_FlatIterator.descr_ge), - - next = interp2app(W_FlatIterator.descr_next), - base = GetSetProperty(W_FlatIterator.descr_base), - index = GetSetProperty(W_FlatIterator.descr_index), - coords = GetSetProperty(W_FlatIterator.descr_coords), -) From noreply at buildbot.pypy.org Fri Oct 10 06:25:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 06:25:43 +0200 (CEST) Subject: [pypy-commit] pypy default: reorganize flatiter Message-ID: <20141010042543.8ED081C0605@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73875:7d64e06caadf Date: 2014-10-09 16:43 -0400 http://bitbucket.org/pypy/pypy/changeset/7d64e06caadf/ Log: reorganize flatiter diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -34,6 +34,19 @@ # this is needed to support W_NDimArray interface self.implementation = FakeArrayImplementation(self.base) + def descr_base(self, space): + return space.wrap(self.base) + + def descr_index(self, space): + return space.wrap(self.state.index) + + def descr_coords(self, space): + self.state = self.iter.update(self.state) + return space.newtuple([space.wrap(c) for c in self.state.indices]) + + def descr_iter(self): + return self + def descr_len(self, space): return space.wrap(self.iter.size) @@ -44,13 +57,6 @@ self.state = self.iter.next(self.state) return w_res - def descr_index(self, space): - return space.wrap(self.state.index) - - def descr_coords(self, space): - self.state = self.iter.update(self.state) - return space.newtuple([space.wrap(c) for c in self.state.indices]) - def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): @@ -87,17 +93,18 @@ finally: self.state = self.iter.reset(self.state) - def descr_iter(self): - return self - - def descr_base(self, space): - return space.wrap(self.base) W_FlatIterator.typedef = TypeDef("numpy.flatiter", + base = GetSetProperty(W_FlatIterator.descr_base), + index = GetSetProperty(W_FlatIterator.descr_index), + coords = GetSetProperty(W_FlatIterator.descr_coords), + __iter__ = interp2app(W_FlatIterator.descr_iter), + __len__ = interp2app(W_FlatIterator.descr_len), + next = interp2app(W_FlatIterator.descr_next), + __getitem__ = interp2app(W_FlatIterator.descr_getitem), __setitem__ = interp2app(W_FlatIterator.descr_setitem), - __len__ = interp2app(W_FlatIterator.descr_len), __eq__ = interp2app(W_FlatIterator.descr_eq), __ne__ = interp2app(W_FlatIterator.descr_ne), @@ -105,9 +112,4 @@ __le__ = interp2app(W_FlatIterator.descr_le), __gt__ = interp2app(W_FlatIterator.descr_gt), __ge__ = interp2app(W_FlatIterator.descr_ge), - - next = interp2app(W_FlatIterator.descr_next), - base = GetSetProperty(W_FlatIterator.descr_base), - index = GetSetProperty(W_FlatIterator.descr_index), - coords = GetSetProperty(W_FlatIterator.descr_coords), ) From noreply at buildbot.pypy.org Fri Oct 10 06:25:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 06:25:44 +0200 (CEST) Subject: [pypy-commit] pypy default: fix searchsorted on array scalar Message-ID: <20141010042544.BCA5C1C0605@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73876:4910e986b85d Date: 2014-10-09 18:03 -0400 http://bitbucket.org/pypy/pypy/changeset/4910e986b85d/ Log: fix searchsorted on array scalar diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -735,7 +735,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap( 'sorter not supported in searchsort')) side = searchside_converter(space, w_side) - if len(self.get_shape()) > 1: + if len(self.get_shape()) != 1: raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -352,7 +352,10 @@ def test_searchsort(self): import numpy as np - import sys + + a = np.array(2) + raises(ValueError, a.searchsorted, 3) + a = np.arange(1, 6) ret = a.searchsorted(3) @@ -389,5 +392,6 @@ ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() + import sys if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") From noreply at buildbot.pypy.org Fri Oct 10 06:25:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 06:25:45 +0200 (CEST) Subject: [pypy-commit] pypy default: implement searchsorted in rpython with jitdriver Message-ID: <20141010042545.F342E1C0605@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73877:0462e4a83ff1 Date: 2014-10-09 18:03 -0400 http://bitbucket.org/pypy/pypy/changeset/0462e4a83ff1/ Log: implement searchsorted in rpython with jitdriver diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -36,7 +36,7 @@ SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] -TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] @@ -109,6 +109,9 @@ if stop < 0: stop += size + 1 if step < 0: + start, stop = stop, start + start -= 1 + stop -= 1 lgt = (stop - start + 1) / step + 1 else: lgt = (stop - start - 1) / step + 1 @@ -475,7 +478,6 @@ class SliceConstant(Node): def __init__(self, start, stop, step): - # no negative support for now self.start = start self.stop = stop self.step = step @@ -582,6 +584,9 @@ w_res = arr.descr_dot(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) + elif self.name == "searchsorted": + w_res = arr.descr_searchsorted(interp.space, arg, + interp.space.wrap('left')) else: assert False # unreachable code elif self.name in THREE_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -700,3 +700,43 @@ out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() out_state = out_iter.next(out_state) + +def _new_binsearch(side, op_name): + binsearch_driver = jit.JitDriver(name='numpy_binsearch_' + side, + greens=['dtype'], + reds='auto') + + def binsearch(space, arr, key, ret): + assert len(arr.get_shape()) == 1 + dtype = key.get_dtype() + op = getattr(dtype.itemtype, op_name) + key_iter, key_state = key.create_iter() + ret_iter, ret_state = ret.create_iter() + ret_iter.track_index = False + size = arr.get_size() + min_idx = 0 + max_idx = size + last_key_val = key_iter.getitem(key_state) + while not key_iter.done(key_state): + key_val = key_iter.getitem(key_state) + if dtype.itemtype.lt(last_key_val, key_val): + max_idx = size + else: + min_idx = 0 + max_idx = max_idx + 1 if max_idx < size else size + last_key_val = key_val + while min_idx < max_idx: + binsearch_driver.jit_merge_point(dtype=dtype) + mid_idx = min_idx + ((max_idx - min_idx) >> 1) + mid_val = arr.getitem(space, [mid_idx]).convert_to(space, dtype) + if op(mid_val, key_val): + min_idx = mid_idx + 1 + else: + max_idx = mid_idx + ret_iter.setitem(ret_state, ret.get_dtype().box(min_idx)) + ret_state = ret_iter.next(ret_state) + key_state = key_iter.next(key_state) + return binsearch + +binsearch_left = _new_binsearch('left', 'lt') +binsearch_right = _new_binsearch('right', 'le') diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -20,7 +20,6 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple -from .selection import app_searchsort def _match_dot_shapes(space, left, right): @@ -740,7 +739,11 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) - app_searchsort(space, self, v, space.wrap(side), ret) + if side == NPY.SEARCHLEFT: + binsearch = loop.binsearch_left + else: + binsearch = loop.binsearch_right + binsearch(space, self, v, ret) if ret.is_scalar(): return ret.get_scalar_value() return ret diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -1,5 +1,4 @@ from pypy.interpreter.error import oefmt -from pypy.interpreter.gateway import applevel from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen @@ -354,39 +353,3 @@ cache[cls] = make_sort_function(space, cls, it) self.cache = cache self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) - - -app_searchsort = applevel(r""" - import operator - - def searchsort(arr, val, side, res): - val = val.flat - res = res.flat - if side == 0: - op = operator.lt - else: - op = operator.le - - size = arr.size - imin = 0 - imax = size - try: - last = val[0] - except IndexError: - return - for i in xrange(len(val)): - key = val[i] - if last < key: - imax = size - else: - imin = 0 - imax = imax + 1 if imax < size else size - last = key - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if op(arr[imid], key): - imin = imid + 1 - else: - imax = imid - res[i] = imin -""", filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -330,3 +330,12 @@ results = interp.results[0] assert isinstance(results, W_NDimArray) assert results.get_dtype().is_int() + + def test_searchsorted(self): + interp = self.run(''' + a = [1, 4, 5, 6, 9] + b = |30| -> ::-1 + c = searchsorted(a, b) + c -> -1 + ''') + assert interp.results[0].value == 0 diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -382,6 +382,9 @@ assert ret == 3 assert isinstance(ret, np.generic) + assert a.searchsorted(3.1) == 3 + assert a.searchsorted(3.9) == 3 + exc = raises(ValueError, a.searchsorted, 3, side=None) assert str(exc.value) == "expected nonempty string for keyword 'side'" exc = raises(ValueError, a.searchsorted, 3, side='') diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -51,7 +51,9 @@ w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value - if isinstance(w_res, boxes.W_Int64Box): + elif isinstance(w_res, boxes.W_Int64Box): + return float(w_res.value) + elif isinstance(w_res, boxes.W_LongBox): return float(w_res.value) elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) @@ -660,3 +662,30 @@ 'raw_load': 2, 'raw_store': 1, }) + + def define_searchsorted(): + return """ + a = [1, 4, 5, 6, 9] + b = |30| -> ::-1 + c = searchsorted(a, b) + c -> -1 + """ + + def test_searchsorted(self): + result = self.run("searchsorted") + assert result == 0 + self.check_trace_count(6) + self.check_simple_loop({ + 'float_lt': 1, + 'guard_false': 2, + 'guard_not_invalidated': 1, + 'guard_true': 2, + 'int_add': 3, + 'int_ge': 1, + 'int_lt': 2, + 'int_mul': 1, + 'int_rshift': 1, + 'int_sub': 1, + 'jump': 1, + 'raw_load': 1, + }) From noreply at buildbot.pypy.org Fri Oct 10 06:25:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 06:25:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fix tests on 32bit Message-ID: <20141010042547.1FE801C0605@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73878:aab89610d3d8 Date: 2014-10-10 00:24 -0400 http://bitbucket.org/pypy/pypy/changeset/aab89610d3d8/ Log: fix tests on 32bit diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -207,7 +207,7 @@ exc = raises(TypeError, np.result_type, a=2) assert str(exc.value) == "result_type() takes no keyword arguments" assert np.result_type(True) is np.dtype('bool') - assert np.result_type(1) is np.dtype('int64') + assert np.result_type(1) is np.dtype('int') assert np.result_type(1.) is np.dtype('float64') assert np.result_type(1+2j) is np.dtype('complex128') assert np.result_type(1, 1.) is np.dtype('float64') diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,8 +101,8 @@ f149 = raw_load(i100, i129, descr=) p150 = getfield_gc_pure(p123, descr=) i151 = int_add(i117, 1) - setarrayitem_gc(p150, 1, 0, descr=) - setarrayitem_gc(p150, 0, 0, descr=) + setarrayitem_gc(p150, 1, 0, descr=) + setarrayitem_gc(p150, 0, 0, descr=) guard_not_invalidated(descr=...) i154 = getfield_raw(ticker_address, descr=) i155 = int_lt(i154, 0) @@ -143,8 +143,8 @@ p152 = getfield_gc_pure(p126, descr=) i153 = int_add(i120, 1) i154 = getfield_raw(ticker_address, descr=) - setarrayitem_gc(p152, 1, 0, descr=) - setarrayitem_gc(p152, 0, 0, descr=) + setarrayitem_gc(p152, 1, 0, descr=) + setarrayitem_gc(p152, 0, 0, descr=) i157 = int_lt(i154, 0) guard_false(i157, descr=...) p158 = new_with_vtable(...) From noreply at buildbot.pypy.org Fri Oct 10 07:22:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 10 Oct 2014 07:22:40 +0200 (CEST) Subject: [pypy-commit] pypy default: some optimizations for arange Message-ID: <20141010052240.3DDB81C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73879:3245cfc8a14a Date: 2014-10-10 01:14 -0400 http://bitbucket.org/pypy/pypy/changeset/3245cfc8a14a/ Log: some optimizations for arange diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,9 +16,9 @@ dtype = test.dtype length = math.ceil((float(stop) - start) / step) length = int(length) - arr = _numpypy.multiarray.zeros(length, dtype=dtype) + arr = _numpypy.multiarray.empty(length, dtype=dtype) i = start - for j in range(arr.size): + for j in xrange(arr.size): arr[j] = i i += step return arr From noreply at buildbot.pypy.org Fri Oct 10 16:13:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 10 Oct 2014 16:13:41 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: pep8 Message-ID: <20141010141341.8D1E41C1347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73880:b0409e6b4887 Date: 2014-10-07 22:57 +0300 http://bitbucket.org/pypy/pypy/changeset/b0409e6b4887/ Log: pep8 diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -267,7 +267,7 @@ def __init__(self, func, data): self.func = func self.data = data - + def descr_call(self, space, __args__): args_w, kwds_w = __args__.unpack() dataps = alloc_raw_storage(CCHARP_SIZE * len(args_w), track_allocation=False) @@ -281,7 +281,7 @@ raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_size())) raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize)) try: - self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), + self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), self.data) finally: free_raw_storage(dataps, track_allocation=False) @@ -315,4 +315,4 @@ w_identity = space.wrap(identity) ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, w_signature, w_identity, w_name, w_doc) - return ufunc_generic + return ufunc_generic From noreply at buildbot.pypy.org Fri Oct 10 16:13:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 10 Oct 2014 16:13:42 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: add kw support to generic ufuncs, lay groundwork for kw support in all ufuncs Message-ID: <20141010141342.D10FA1C1347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73881:8c3df8a4ea5f Date: 2014-10-08 17:53 +0300 http://bitbucket.org/pypy/pypy/changeset/8c3df8a4ea5f/ Log: add kw support to generic ufuncs, lay groundwork for kw support in all ufuncs diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -30,6 +30,7 @@ long_double_size = 8 + def new_dtype_getter(num): @specialize.memo() def _get_dtype(space): @@ -200,25 +201,30 @@ def descr_nonzero(self, space): return space.wrap(self.get_dtype(space).itemtype.bool(self)) + # TODO: support all kwargs in ufuncs like numpy ufunc_object.c + sig = None + cast = None + extobj = None + def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): from pypy.module.micronumpy import ufuncs return getattr(ufuncs.get(space), ufunc_name).call( - space, [self, w_out]) + space, [self, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs return getattr(ufuncs.get(space), ufunc_name).call( - space, [self, w_other, w_out]) + space, [self, w_other, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs return getattr(ufuncs.get(space), ufunc_name).call( - space, [w_other, self, w_out]) + space, [w_other, self, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_add = _binop_impl("add") diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -559,10 +559,10 @@ w_res = arr.descr_all(interp.space) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative - w_res = neg.call(interp.space, [arr]) + w_res = neg.call(interp.space, [arr], None, None, None) elif self.name == "cos": cos = ufuncs.get(interp.space).cos - w_res = cos.call(interp.space, [arr]) + w_res = cos.call(interp.space, [arr], None, None, None) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "argsort": diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -855,11 +855,16 @@ return w_ret # --------------------- operations ---------------------------- + # TODO: support all kwargs like numpy ufunc_object.c + sig = None + cast = None + extobj = None + def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): return getattr(ufuncs.get(space), ufunc_name).call( - space, [self, w_out]) + space, [self, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -880,7 +885,7 @@ def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): return getattr(ufuncs.get(space), ufunc_name).call( - space, [self, w_other, w_out]) + space, [self, w_other, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -924,7 +929,7 @@ def impl(self, space, w_other): w_out = self ufunc = getattr(ufuncs.get(space), ufunc_name) - return ufunc.call(space, [self, w_other, w_out]) + return ufunc.call(space, [self, w_other, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) descr_iadd = _binop_inplace_impl("add") @@ -945,7 +950,7 @@ def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) return getattr(ufuncs.get(space), ufunc_name).call( - space, [w_other, self, w_out]) + space, [w_other, self, w_out], self.sig, self.cast, self.extobj) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -112,7 +112,7 @@ assert 'object' in str(e) # Use pypy specific extension for out_dtype adder_ufunc0 = frompyfunc(adder, 2, 1, dtypes=['match']) - adder_ufunc1 = frompyfunc([adder, adder], 2, 1, + adder_ufunc1 = frompyfunc([adder, adder], 2, 1, dtypes=[int, int, int, float, float, float]) int_func22 = frompyfunc([int, int], 2, 2, signature='(i),(i)->(i),(i)', dtypes=['match']) @@ -147,7 +147,7 @@ for i in range(in_array.size): out_flat[i] = in_flat[i] * 2 from numpy import frompyfunc, dtype, arange - ufunc = frompyfunc([int_times2, double_times2], 1, 1, + ufunc = frompyfunc([int_times2, double_times2], 1, 1, signature='()->()', dtypes=[dtype(int), dtype(int), dtype(float), dtype(float) @@ -160,6 +160,21 @@ af2 = ufunc(af) assert all(af2 == af * 2) + def test_ufunc_kwargs(self): + from numpy import ufunc, frompyfunc, arange, dtype + def adder(a, b): + return a+b + adder_ufunc = frompyfunc(adder, 2, 1, dtypes=['match']) + args = [arange(10), arange(10)] + res = adder_ufunc(*args, dtype=int) + assert all(res == args[0] + args[1]) + # extobj support needed for linalg ufuncs + res = adder_ufunc(*args, extobj=[8192, 0, None]) + assert all(res == args[0] + args[1]) + raises(TypeError, adder_ufunc, *args, blah=True) + raises(TypeError, adder_ufunc, *args, extobj=True) + raises(RuntimeError, adder_ufunc, *args, sig='(d,d)->(d)', dtype=int) + def test_ufunc_attrs(self): from numpy import add, multiply, sin diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -27,7 +27,6 @@ assert isinstance(w_npyobj, W_NDimArray) return w_npyobj.get_dtype() - class W_Ufunc(W_Root): _immutable_fields_ = [ "name", "promote_to_largest", "promote_to_float", "promote_bools", "nin", @@ -60,37 +59,37 @@ def descr_call(self, space, __args__): args_w, kwds_w = __args__.unpack() - # it occurs to me that we don't support any datatypes that - # require casting, change it later when we do - kwds_w.pop('casting', None) - w_subok = kwds_w.pop('subok', None) - w_out = kwds_w.pop('out', space.w_None) - # Setup a default value for out + # sig, extobj are used in generic ufuncs + w_subok, w_out, sig, casting, extobj = self.parse_kwargs(space, kwds_w) if space.is_w(w_out, space.w_None): out = None else: out = w_out if (w_subok is not None and space.is_true(w_subok)): - raise OperationError(space.w_NotImplementedError, - space.wrap("parameters unsupported")) - if kwds_w or len(args_w) < self.nin: - raise OperationError(space.w_ValueError, - space.wrap("invalid number of arguments") - ) + raise oefmt(space.w_NotImplementedError, "parameter subok unsupported") + if kwds_w: + # numpy compatible, raise with only the first of maybe many keys + kw = kwds_w.keys()[0] + raise oefmt(space.w_TypeError, + "'%s' is an invalid keyword to ufunc '%s'", kw, self.name) + if len(args_w) < self.nin: + raise oefmt(space.w_ValueError, "invalid number of arguments" + ", expected %d got %d", len(args_w), self.nin) elif (len(args_w) > self.nin and out is not None) or \ (len(args_w) > self.nin + 1): - raise OperationError(space.w_TypeError, - space.wrap("invalid number of arguments") - ) + raise oefmt(space.w_TypeError, "invalid number of arguments") # Override the default out value, if it has been provided in w_wargs if len(args_w) > self.nin: + if out: + raise oefmt(space.w_ValueError, "cannot specify 'out' as both " + "a positional and keyword argument") out = args_w[-1] else: args_w = args_w + [out] if out is not None and not isinstance(out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) - return self.call(space, args_w) + return self.call(space, args_w, sig, casting, extobj) def descr_accumulate(self, space, w_obj, w_axis=None, w_dtype=None, w_out=None): if space.is_none(w_axis): @@ -295,6 +294,22 @@ raise OperationError(space.w_ValueError, space.wrap( "outer product only supported for binary functions")) + def parse_kwargs(self, space, kwds_w): + # we don't support casting, change it when we do + casting = kwds_w.pop('casting', None) + w_subok = kwds_w.pop('subok', None) + w_out = kwds_w.pop('out', space.w_None) + sig = None + # TODO handle triple of extobj, + # see _extract_pyvals in ufunc_object.c + extobj_w = kwds_w.pop('extobj', get_extobj(space)) + if not space.isinstance_w(extobj_w, space.w_list) or space.len_w(extobj_w) != 3: + raise oefmt(space.w_TypeError, "'extobj' must be a list of 3 values") + return w_subok, w_out, sig, casting, extobj_w + +def get_extobj(space): + extobj_w = space.newlist([space.wrap(8192), space.wrap(0), space.w_None]) + return extobj_w class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] @@ -311,7 +326,7 @@ self.func = func self.bool_result = bool_result - def call(self, space, args_w): + def call(self, space, args_w, sig, casting, extobj): w_obj = args_w[0] out = None if len(args_w) > 1: @@ -397,7 +412,8 @@ return False @jit.unroll_safe - def call(self, space, args_w): + def call(self, space, args_w, sig, casting, extobj): + w_obj = args_w[0] if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: @@ -529,9 +545,8 @@ cumulative=False): raise oefmt(space.w_NotImplementedError, 'not implemented yet') - def call(self, space, args_w): - #from pypy.module._cffi_backend import newtype, func as _func - out = None + def call(self, space, args_w, sig, casting, extobj): + w_obj = args_w[0] inargs = [] if len(args_w) < self.nin: raise oefmt(space.w_ValueError, @@ -549,7 +564,9 @@ raise oefmt(space.w_TypeError, 'output arg %d must be an array, not %s', i+self.nin, str(args_w[i+self.nin])) outargs[i] = out - index = self.type_resolver(space, inargs, outargs) + if sig is None: + sig = space.wrap(self.signature) + index = self.type_resolver(space, inargs, outargs, sig) outargs = self.alloc_outargs(space, index, inargs, outargs) inargs0 = inargs[0] outargs0 = outargs[0] @@ -573,9 +590,38 @@ return loop.call_many_to_many(space, new_shape, self.funcs[index], res_dtype, inargs, outargs) - def type_resolver(self, space, inargs, outargs): + def parse_kwargs(self, space, kwargs_w): + w_subok, w_out, casting, sig, extobj = \ + W_Ufunc.parse_kwargs(self, space, kwargs_w) + dtype_w = kwargs_w.pop('dtype', None) + if not space.is_w(dtype_w, space.w_None) and not dtype_w is None: + if sig: + raise oefmt(space.w_RuntimeError, + "cannot specify both 'sig' and 'dtype'") + dtype = descriptor.decode_w_dtype(space, dtype_w) + sig = space.newtuple([dtype]) + order = kwargs_w.pop('dtype', None) + if not space.is_w(order, space.w_None) and not order is None: + raise oefmt(space.w_NotImplementedError, '"order" keyword not implemented') + parsed_kw = [] + for kw in kwargs_w: + if kw.startswith('sig'): + if sig: + raise oefmt(space.w_RuntimeError, + "cannot specify both 'sig' and 'dtype'") + sig = kwargs_w[kw] + parsed_kw.append(kw) + elif kw.startswith('where'): + raise oefmt(space.w_NotImplementedError, + '"where" keyword not implemented') + parsed_kw.append(kw) + for kw in parsed_kw: + kwargs_w.pop(kw) + return w_subok, w_out, sig, casting, extobj + + def type_resolver(self, space, inargs, outargs, sig): # Find a match for the inargs.dtype in self.dtypes, like - # linear_search_type_resolver in numy ufunc_type_resolutions.c + # linear_search_type_resolver in numpy ufunc_type_resolutions.c inargs0 = inargs[0] assert isinstance(inargs0, W_NDimArray) for i in range(0, len(self.dtypes), self.nargs): @@ -601,7 +647,7 @@ outargs[i] = W_NDimArray.from_shape(space, temp_shape, dtype, order) for i in range(len(outargs)): assert isinstance(outargs[i], W_NDimArray) - return outargs + return outargs def prep_call(self, space, index, inargs, outargs): # Use the index and signature to determine From noreply at buildbot.pypy.org Fri Oct 10 16:13:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 10 Oct 2014 16:13:44 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: rename for numpy compatability Message-ID: <20141010141344.0C2B81C1347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73882:055731d00921 Date: 2014-10-08 17:53 +0300 http://bitbucket.org/pypy/pypy/changeset/055731d00921/ Log: rename for numpy compatability diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -299,7 +299,7 @@ @cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject) -def _PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, +def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, signature): funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -212,14 +212,14 @@ res = api._PyArray_SimpleNewFromData(0, ptr_s, 15, ptr_a) assert res.get_scalar_value().real == 3. assert res.get_scalar_value().imag == 4. - + def _test_Ufunc_FromFuncAndDataAndSignature(self, space, api): py.test.skip('preliminary non-translated test') ''' PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; - ufunc = api._PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, + ufunc = api.PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, nin, nout, identity, doc, check_return, signature) ''' @@ -322,7 +322,7 @@ char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; PyObject * retval; - retval = _PyUFunc_FromFuncAndDataAndSignature(funcs, + retval = PyUFunc_FromFuncAndDataAndSignature(funcs, array_data, types, 2, 1, 1, PyUFunc_None, "times2", "times2_docstring", 0, "()->()"); return retval; From noreply at buildbot.pypy.org Fri Oct 10 16:13:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 10 Oct 2014 16:13:45 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: cleanup, add failing test that mimics numpy.linalg.inv's signature Message-ID: <20141010141345.659111C1347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73883:0e380b57db19 Date: 2014-10-10 17:05 +0300 http://bitbucket.org/pypy/pypy/changeset/0e380b57db19/ Log: cleanup, add failing test that mimics numpy.linalg.inv's signature showing there is much more to be done to finish this API diff --git a/pypy/module/cpyext/include/numpy/__ufunc_api.h b/pypy/module/cpyext/include/numpy/__ufunc_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__ufunc_api.h +++ /dev/null @@ -1,328 +0,0 @@ - -#ifdef _UMATHMODULE - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else -NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif - -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ - (PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *); -NPY_NO_EXPORT int PyUFunc_GenericFunction \ - (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); -NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **); -NPY_NO_EXPORT int PyUFunc_checkfperr \ - (int, PyObject *, int *); -NPY_NO_EXPORT void PyUFunc_clearfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_getfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_handlefperr \ - (int, PyObject *, int, int *); -NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ - (PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, const char *); -NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ - (void **, size_t); -NPY_NO_EXPORT void PyUFunc_e_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_ValidateCasting \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ - (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) -#define PyUFunc_FromFuncAndData \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int)) \ - PyUFunc_API[1]) -#define PyUFunc_RegisterLoopForType \ - (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \ - PyUFunc_API[2]) -#define PyUFunc_GenericFunction \ - (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ - PyUFunc_API[3]) -#define PyUFunc_f_f_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[4]) -#define PyUFunc_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[5]) -#define PyUFunc_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[6]) -#define PyUFunc_g_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[7]) -#define PyUFunc_F_F_As_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[8]) -#define PyUFunc_F_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[9]) -#define PyUFunc_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[10]) -#define PyUFunc_G_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[11]) -#define PyUFunc_O_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[12]) -#define PyUFunc_ff_f_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[13]) -#define PyUFunc_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[14]) -#define PyUFunc_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[15]) -#define PyUFunc_gg_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[16]) -#define PyUFunc_FF_F_As_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[17]) -#define PyUFunc_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[18]) -#define PyUFunc_FF_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[19]) -#define PyUFunc_GG_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[20]) -#define PyUFunc_OO_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[21]) -#define PyUFunc_O_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[22]) -#define PyUFunc_OO_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[23]) -#define PyUFunc_On_Om \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[24]) -#define PyUFunc_GetPyValues \ - (*(int (*)(char *, int *, int *, PyObject **)) \ - PyUFunc_API[25]) -#define PyUFunc_checkfperr \ - (*(int (*)(int, PyObject *, int *)) \ - PyUFunc_API[26]) -#define PyUFunc_clearfperr \ - (*(void (*)(void)) \ - PyUFunc_API[27]) -#define PyUFunc_getfperr \ - (*(int (*)(void)) \ - PyUFunc_API[28]) -#define PyUFunc_handlefperr \ - (*(int (*)(int, PyObject *, int, int *)) \ - PyUFunc_API[29]) -#define PyUFunc_ReplaceLoopBySignature \ - (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \ - PyUFunc_API[30]) -#define PyUFunc_FromFuncAndDataAndSignature \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, const char *)) \ - PyUFunc_API[31]) -#define PyUFunc_SetUsesArraysAsData \ - (*(int (*)(void **, size_t)) \ - PyUFunc_API[32]) -#define PyUFunc_e_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[33]) -#define PyUFunc_e_e_As_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[34]) -#define PyUFunc_e_e_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[35]) -#define PyUFunc_ee_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[36]) -#define PyUFunc_ee_e_As_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[37]) -#define PyUFunc_ee_e_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[38]) -#define PyUFunc_DefaultTypeResolver \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ - PyUFunc_API[39]) -#define PyUFunc_ValidateCasting \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ - PyUFunc_API[40]) -#define PyUFunc_RegisterLoopForDescr \ - (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ - PyUFunc_API[41]) - -static int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); - return -1; - } - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_UMATH_RETVAL NULL -#else -#define NUMPY_IMPORT_UMATH_RETVAL -#endif - -#define import_umath() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return NUMPY_IMPORT_UMATH_RETVAL;\ - }\ - } while(0) - -#define import_umath1(ret) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return ret;\ - }\ - } while(0) - -#define import_umath2(ret, msg) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError, msg);\ - return ret;\ - }\ - } while(0) - -#define import_ufunc() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - }\ - } while(0) - -#endif diff --git a/pypy/module/cpyext/include/numpy/ufuncobject.h b/pypy/module/cpyext/include/numpy/ufuncobject.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/ufuncobject.h +++ /dev/null @@ -1,479 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H - -//#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * The legacy generic inner loop for a standard element-wise or - * generalized ufunc. - */ -typedef void (*PyUFuncGenericFunction) - (char **args, - npy_intp *dimensions, - npy_intp *strides, - void *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a standard element-wise ufunc. This typedef is also - * more consistent with the other NumPy function pointer typedefs - * than PyUFuncGenericFunction. - */ -typedef void (PyUFunc_StridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - npy_intp count, - NpyAuxData *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a masked standard element-wise ufunc. "Masked" here means that it skips - * doing calculations on any items for which the maskptr array has a true - * value. - */ -typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - char *maskptr, npy_intp mask_stride, - npy_intp count, - NpyAuxData *innerloopdata); - -/* Forward declaration for the type resolver and loop selector typedefs */ -struct _tagPyUFuncObject; - -/* - * Given the operands for calling a ufunc, should determine the - * calculation input and output data types and return an inner loop function. - * This function should validate that the casting rule is being followed, - * and fail if it is not. - * - * For backwards compatibility, the regular type resolution function does not - * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns a standard NpyAuxData - * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros - * work. - * - * ufunc: The ufunc object. - * casting: The 'casting' parameter provided to the ufunc. - * operands: An array of length (ufunc->nin + ufunc->nout), - * with the output parameters possibly NULL. - * type_tup: Either NULL, or the type_tup passed to the ufunc. - * out_dtypes: An array which should be populated with new - * references to (ufunc->nin + ufunc->nout) new - * dtypes, one for each input and output. These - * dtypes should all be in native-endian format. - * - * Should return 0 on success, -1 on failure (with exception set), - * or -2 if Py_NotImplemented should be returned. - */ -typedef int (PyUFunc_TypeResolutionFunc)( - struct _tagPyUFuncObject *ufunc, - NPY_CASTING casting, - PyArrayObject **operands, - PyObject *type_tup, - PyArray_Descr **out_dtypes); - -/* - * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, - * and an array of fixed strides (the array will contain NPY_MAX_INTP for - * strides which are not necessarily fixed), returns an inner loop - * with associated auxiliary data. - * - * For backwards compatibility, there is a variant of the inner loop - * selection which returns an inner loop irrespective of the strides, - * and with a void* static auxiliary data instead of an NpyAuxData * - * dynamically allocatable auxiliary data. - * - * ufunc: The ufunc object. - * dtypes: An array which has been populated with dtypes, - * in most cases by the type resolution funciton - * for the same ufunc. - * fixed_strides: For each input/output, either the stride that - * will be used every time the function is called - * or NPY_MAX_INTP if the stride might change or - * is not known ahead of time. The loop selection - * function may use this stride to pick inner loops - * which are optimized for contiguous or 0-stride - * cases. - * out_innerloop: Should be populated with the correct ufunc inner - * loop for the given type. - * out_innerloopdata: Should be populated with the void* data to - * be passed into the out_innerloop function. - * out_needs_api: If the inner loop needs to use the Python API, - * should set the to 1, otherwise should leave - * this untouched. - */ -typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_InnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - npy_intp *fixed_strides, - PyUFunc_StridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyArray_Descr *mask_dtype, - npy_intp *fixed_strides, - npy_intp fixed_mask_stride, - PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); - -typedef struct _tagPyUFuncObject { - PyObject_HEAD - /* - * nin: Number of inputs - * nout: Number of outputs - * nargs: Always nin + nout (Why is it stored?) - */ - int nin, nout, nargs; - - /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */ - int identity; - - /* Array of one-dimensional core loops */ - PyUFuncGenericFunction *functions; - /* Array of funcdata that gets passed into the functions */ - void **data; - /* The number of elements in 'functions' and 'data' */ - int ntypes; - - /* Does not appear to be used */ - int check_return; - - /* The name of the ufunc */ - char *name; - - /* Array of type numbers, of size ('nargs' * 'ntypes') */ - char *types; - - /* Documentation string */ - char *doc; - - void *ptr; - PyObject *obj; - PyObject *userloops; - - /* generalized ufunc parameters */ - - /* 0 for scalar ufunc; 1 for generalized ufunc */ - int core_enabled; - /* number of distinct dimension names in signature */ - int core_num_dim_ix; - - /* - * dimension indices of input/output argument k are stored in - * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] - */ - - /* numbers of core dimensions of each argument */ - int *core_num_dims; - /* - * dimension indices in a flatted form; indices - * are in the range of [0,core_num_dim_ix) - */ - int *core_dim_ixs; - /* - * positions of 1st core dimensions of each - * argument in core_dim_ixs - */ - int *core_offsets; - /* signature string for printing purpose */ - char *core_signature; - - /* - * A function which resolves the types and fills an array - * with the dtypes for the inputs and outputs. - */ - PyUFunc_TypeResolutionFunc *type_resolver; - /* - * A function which returns an inner loop written for - * NumPy 1.6 and earlier ufuncs. This is for backwards - * compatibility, and may be NULL if inner_loop_selector - * is specified. - */ - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; - /* - * A function which returns an inner loop for the new mechanism - * in NumPy 1.7 and later. If provided, this is used, otherwise - * if NULL the legacy_inner_loop_selector is used instead. - */ - PyUFunc_InnerLoopSelectionFunc *inner_loop_selector; - /* - * A function which returns a masked inner loop for the ufunc. - */ - PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; - - /* - * List of flags for each operand when ufunc is called by nditer object. - * These flags will be used in addition to the default flags for each - * operand set by nditer object. - */ - npy_uint32 *op_flags; - - /* - * List of global flags used when ufunc is called by nditer object. - * These flags will be used in addition to the default global flags - * set by nditer object. - */ - npy_uint32 iter_flags; -} PyUFuncObject; - -#include "arrayobject.h" - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -/* platform-dependent code translates floating point - status to an integer sum of these values -*/ -#define UFUNC_FPE_DIVIDEBYZERO 1 -#define UFUNC_FPE_OVERFLOW 2 -#define UFUNC_FPE_UNDERFLOW 4 -#define UFUNC_FPE_INVALID 8 - -/* Error mode that avoids look-up (no checking) */ -#define UFUNC_ERR_DEFAULT 0 - -#define UFUNC_OBJ_ISOBJECT 1 -#define UFUNC_OBJ_NEEDS_API 2 - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT2 \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); -#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -/* - * UFunc has unit of 1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_One 1 -/* - * UFunc has unit of 0, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_Zero 0 -/* - * UFunc has no unit, and the order of operations cannot be reordered. - * This case does not allow reduction with multiple axes at once. - */ -#define PyUFunc_None -1 -/* - * UFunc has no unit, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_ReorderableNone -2 - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; - int nargs; - PyArray_Descr **arg_dtypes; -} PyUFunc_Loop1d; - - -#include "numpy/__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -#define UFUNC_CHECK_ERROR(arg) \ - do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \ - ((arg)->errormask && \ - PyUFunc_checkfperr((arg)->errormask, \ - (arg)->errobj, \ - &(arg)->first))) \ - goto fail;} while (0) - -/* This code checks the IEEE status flags in a platform-dependent way */ -/* Adapted from Numarray */ - -#if (defined(__unix__) || defined(unix)) && !defined(USG) -#include -#endif - -/* OSF/Alpha (Tru64) ---------------------------------------------*/ -#if defined(__osf__) && defined(__alpha) - -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - unsigned long fpstatus; \ - \ - fpstatus = ieee_get_fp_control(); \ - /* clear status bits as well as disable exception mode if on */ \ - ieee_set_fp_control( 0 ); \ - ret = ((IEEE_STATUS_DZE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((IEEE_STATUS_OVF & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((IEEE_STATUS_UNF & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((IEEE_STATUS_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } - -/* MS Windows -----------------------------------------------------*/ -#elif defined(_MSC_VER) - -#include - -/* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#endif - -#if defined(_WIN64) -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) _clearfp(); \ - \ - ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } -#else -/* windows enables sse on 32 bit, so check both flags */ -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus, fpstatus2; \ - _statusfp2(&fpstatus, &fpstatus2); \ - _clearfp(); \ - fpstatus |= fpstatus2; \ - \ - ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } -#endif - -/* Solaris --------------------------------------------------------*/ -/* --------ignoring SunOS ieee_flags approach, someone else can -** deal with that! */ -#elif defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \ - defined(__NetBSD__) -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus; \ - \ - fpstatus = (int) fpgetsticky(); \ - ret = ((FP_X_DZ & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_X_OFL & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_X_UFL & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_X_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) fpsetsticky(0); \ - } - -#elif defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__CYGWIN__) || defined(__MINGW32__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) - -#if defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__MINGW32__) || defined(__FreeBSD__) -#include -#elif defined(__CYGWIN__) -#include "numpy/fenv/fenv.h" -#endif - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ - ret = ((FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ -} - -#elif defined(_AIX) - -#include -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - fpflag_t fpstatus; \ - \ - fpstatus = fp_read_flag(); \ - ret = ((FP_DIV_BY_ZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - fp_swap_flag(0); \ -} - -#else - -#define NO_FLOATING_POINT_SUPPORT -#define UFUNC_CHECK_STATUS(ret) { \ - ret = 0; \ - } - -#endif - -/* - * THESE MACROS ARE DEPRECATED. - * Use npy_set_floatstatus_* in the npymath library. - */ -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -#define UFUNC_NOFPE -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -275,14 +275,20 @@ steps = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False) for i in range(len(args_w)): arg_i = args_w[i] - assert isinstance(arg_i, W_NDimArray) - raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, arg_i.implementation.storage)) - #This assumes we iterate over the whole array (it should be a view...) - raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_size())) - raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize)) + if isinstance(arg_i, W_NDimArray): + raw_storage_setitem(dataps, CCHARP_SIZE * i, + rffi.cast(rffi.CCHARP, arg_i.implementation.storage)) + #This assumes we iterate over the whole array (it should be a view...) + raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_size())) + raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize)) + else: + raise OperationError(space.w_NotImplementedError, + space.wrap("cannot call GenericUFunc with %r as arg %d" % (arg_i, i))) try: - self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), - rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), self.data) + arg1 = rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps) + arg2 = rffi.cast(npy_intpp, dims) + arg3 = rffi.cast(npy_intpp, steps) + self.func(arg1, arg2, arg3, self.data) finally: free_raw_storage(dataps, track_allocation=False) free_raw_storage(dims, track_allocation=False) @@ -295,7 +301,8 @@ GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, rffi.VOIDP], lltype.Void) gufunctype = lltype.Ptr(GenericUfunc) -# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, why??? +# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there +# a problem with casting function pointers? @cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -316,7 +316,7 @@ def test_ufunc(self): from _numpypy.multiarray import arange mod = self.import_extension('foo', [ - ("create_ufunc", "METH_NOARGS", + ("create_ufunc_basic", "METH_NOARGS", """ PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; @@ -328,8 +328,27 @@ return retval; """ ), - ], prologue='''#include "numpy/ndarraytypes.h" - #include + ("create_ufunc_signature", "METH_NOARGS", + """ + PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; + char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; + void *array_data[] = {NULL, NULL}; + PyObject * retval; + retval = PyUFunc_FromFuncAndDataAndSignature(funcs, + array_data, types, 2, 1, 1, PyUFunc_None, + "times2", "times2_docstring", 0, "(m)->(m)"); + return retval; + """ + ), + ], prologue=''' + #include "numpy/ndarraytypes.h" + /*#include */ + typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp *dimensions, + npy_intp *strides, + void *innerloopdata); + #define PyUFunc_None -1 void double_times2(char **args, npy_intp *dimensions, npy_intp* steps, void* data) { @@ -372,7 +391,10 @@ out += out_step; }; }; ''') - times2 = mod.create_ufunc() + times2 = mod.create_ufunc_basic() arr = arange(12, dtype='i').reshape(3, 4) - out = times2(arr) + out = times2(arr, sig='(d)->(d)', extobj=[0, 0, None]) assert (out == arr * 2).all() + times2prime = mod.create_ufunc_signature() + out = times2prime(arr, sig='(d)->(d)', extobj=[0, 0, None]) + assert (out == arr * 2).all() diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -579,7 +579,6 @@ # func is going to do all the work arglist = space.newlist(inargs + outargs) func = self.funcs[index] - arglist = space.newlist(inargs + outargs) space.call_args(func, Arguments.frompacked(space, arglist)) if len(outargs) < 2: return outargs0 @@ -1051,7 +1050,7 @@ if not space.is_true(space.callable(w_func)): raise oefmt(space.w_TypeError, 'func must be callable') func = [w_func] - match_dtypes = False + match_dtypes = False if space.is_none(w_dtypes) and not signature: raise oefmt(space.w_NotImplementedError, 'object dtype requested but not implemented') @@ -1061,7 +1060,7 @@ if space.isinstance_w(_dtypes[0], space.w_str) and space.str_w(_dtypes[0]) == 'match': dtypes = [] match_dtypes = True - else: + else: dtypes = [None]*len(_dtypes) for i in range(len(dtypes)): dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i]) @@ -1074,7 +1073,7 @@ elif space.isinstance_w(w_identity, space.w_int): identity = \ descriptor.get_dtype_cache(space).w_longdtype.box(space.int_w(w_identity)) - else: + else: raise oefmt(space.w_ValueError, 'identity must be None or an int') From noreply at buildbot.pypy.org Fri Oct 10 16:13:50 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 10 Oct 2014 16:13:50 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: merge default into branch Message-ID: <20141010141350.213C31C1347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r73884:0587bee36e87 Date: 2014-10-10 17:07 +0300 http://bitbucket.org/pypy/pypy/changeset/0587bee36e87/ Log: merge default into branch diff too long, truncating to 2000 out of 10352 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -367,3 +367,43 @@ Detailed license information is contained in the NOTICE file in the directory. + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. diff --git a/lib-python/2.7/test/test_select.py b/lib-python/2.7/test/test_select.py --- a/lib-python/2.7/test/test_select.py +++ b/lib-python/2.7/test/test_select.py @@ -62,7 +62,12 @@ # removes an item and at the middle the iteration stops. # PyPy: 'a' ends up empty, because the iteration is done on # a copy of the original list: fileno() is called 10 times. - self.assert_(len(result[1]) <= 5) + if test_support.check_impl_detail(cpython=True): + self.assertEqual(len(result[1]), 5) + self.assertEqual(len(a), 5) + if test_support.check_impl_detail(pypy=True): + self.assertEqual(len(result[1]), 10) + self.assertEqual(len(a), 0) def test_main(): test_support.run_unittest(SelectTestCase) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,12 @@ .. this is a revision shortly after release-2.4.x .. startrev: 7026746cbb1b +.. branch: win32-fixes5 +Fix c code generation for msvc so empty "{ }" are avoided in unions, +Avoid re-opening files created with NamedTemporaryFile, +Allocate by 4-byte chunks in rffi_platform, +Skip testing objdump if it does not exist, +and other small adjustments in own tests + +.. branch: rtyper-stuff +Small internal refactorings in the rtyper. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,6 +37,13 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. +If you wish to override this detection method to use a different compiler +(mingw or a different version of MSVC): + +* set up the PATH and other environment variables as needed +* set the `CC` environment variable to compiler exe to be used, + for a different version of MSVC `SET CC=cl.exe`. + **Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. @@ -264,7 +271,7 @@ Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an -environment variable CC to the compliter exe, testing will use it. +environment variable CC to the compiler exe, testing will use it. .. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,8 +17,6 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() - from pypy.module._socket.interp_func import State - space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,5 +1,6 @@ from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET +from rpython.rlib.rarithmetic import intmask from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, WrappedDefault @@ -46,9 +47,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host, lock) + res = rsocket.gethostbyname_ex(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -60,9 +60,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host, lock) + res = rsocket.gethostbyaddr(host) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -174,7 +173,7 @@ Convert a 16-bit integer from network to host byte order. """ - return space.wrap(rsocket.ntohs(x)) + return space.wrap(rsocket.ntohs(intmask(x))) @unwrap_spec(x="c_uint") def ntohl(space, x): @@ -190,7 +189,7 @@ Convert a 16-bit integer from host to network byte order. """ - return space.wrap(rsocket.htons(x)) + return space.wrap(rsocket.htons(intmask(x))) @unwrap_spec(x="c_uint") def htonl(space, x): @@ -319,10 +318,3 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) - -class State(object): - def __init__(self, space): - self.netdb_lock = None - - def startup(self, space): - self.netdb_lock = space.allocate_lock() diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -109,10 +109,11 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): + assert isinstance(port, int) if port < 0 or port > 0xffff: raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) - return rffi.cast(rffi.USHORT, port) + return port def make_unsigned_flowinfo(space, flowinfo): if flowinfo < 0 or flowinfo > 0xfffff: @@ -401,8 +402,10 @@ The value argument can either be an integer or a string. """ try: - optval = space.int_w(w_optval) - except: + optval = space.c_int_w(w_optval) + except OperationError, e: + if e.async(space): + raise optval = space.str_w(w_optval) try: self.sock.setsockopt(level, optname, optval) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -498,6 +498,13 @@ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 + # + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 31) + raises(TypeError, s.setsockopt, socket.SOL_SOCKET, + socket.SO_REUSEADDR, 2 ** 32 + 1) + assert s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0 + # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse != 0 diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -266,10 +266,16 @@ buf = None if typ == rwinreg.REG_DWORD: - if space.isinstance_w(w_value, space.w_int): + if space.is_none(w_value) or ( + space.isinstance_w(w_value, space.w_int) or + space.isinstance_w(w_value, space.w_long)): + if space.is_none(w_value): + value = r_uint(0) + else: + value = space.c_uint_w(w_value) buflen = rffi.sizeof(rwin32.DWORD) buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw') - buf1[0] = space.uint_w(w_value) + buf1[0] = value buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -40,7 +40,7 @@ cls.w_tmpfilename = space.wrap(str(udir.join('winreg-temp'))) test_data = [ - ("Int Value", 45, _winreg.REG_DWORD), + ("Int Value", 0xFEDCBA98, _winreg.REG_DWORD), ("Str Value", "A string Value", _winreg.REG_SZ), ("Unicode Value", u"A unicode Value", _winreg.REG_SZ), ("Str Expand", "The path is %path%", _winreg.REG_EXPAND_SZ), @@ -137,9 +137,11 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx, REG_BINARY + from _winreg import CreateKey, SetValueEx, REG_BINARY, REG_DWORD key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, None) + SetValueEx(sub_key, 'Int Value', 0, REG_DWORD, 45) for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -116,6 +116,8 @@ validate_fd(fileno(fp)) return _feof(fp) +def is_valid_fp(fp): + return is_valid_fd(fileno(fp)) constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, - cpython_struct) + cpython_struct, is_valid_fp) from pypy.module.cpyext.pyobject import PyObject, borrow_from from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject @@ -154,6 +154,10 @@ source = "" filename = rffi.charp2str(filename) buf = lltype.malloc(rffi.CCHARP.TO, BUF_SIZE, flavor='raw') + if not is_valid_fp(fp): + lltype.free(buf, flavor='raw') + PyErr_SetFromErrno(space, space.w_IOError) + return None try: while True: count = fread(buf, 1, BUF_SIZE, fp) diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -89,12 +89,12 @@ rffi.free_charp(buf) assert 0 == run("42 * 43") - + assert -1 == run("4..3 * 43") - + assert api.PyErr_Occurred() api.PyErr_Clear() - + def test_run_string(self, space, api): def run(code, start, w_globals, w_locals): buf = rffi.str2charp(code) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -20,6 +20,7 @@ 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', + 'result_type': 'arrayops.result_type', 'where': 'arrayops.where', 'set_string_function': 'appbridge.set_string_function', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -16,9 +16,9 @@ dtype = test.dtype length = math.ceil((float(stop) - start) / step) length = int(length) - arr = _numpypy.multiarray.zeros(length, dtype=dtype) + arr = _numpypy.multiarray.empty(length, dtype=dtype) i = start - for j in range(arr.size): + for j in xrange(arr.size): arr[j] = i i += step return arr diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,3 +1,4 @@ +from rpython.rlib import jit from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \ @@ -6,6 +7,7 @@ from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple +from .boxes import W_GenericBox def where(space, w_arr, w_x=None, w_y=None): @@ -283,3 +285,28 @@ else: loop.diagonal_array(space, arr, out, offset, axis1, axis2, shape) return out + + + at jit.unroll_safe +def result_type(space, __args__): + args_w, kw_w = __args__.unpack() + if kw_w: + raise oefmt(space.w_TypeError, "result_type() takes no keyword arguments") + if not args_w: + raise oefmt(space.w_ValueError, "at least one array or dtype is required") + result = None + for w_arg in args_w: + if isinstance(w_arg, W_NDimArray): + dtype = w_arg.get_dtype() + elif isinstance(w_arg, W_GenericBox) or ( + space.isinstance_w(w_arg, space.w_int) or + space.isinstance_w(w_arg, space.w_float) or + space.isinstance_w(w_arg, space.w_complex) or + space.isinstance_w(w_arg, space.w_long) or + space.isinstance_w(w_arg, space.w_bool)): + dtype = ufuncs.find_dtype_for_scalar(space, w_arg) + else: + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) + result = ufuncs.find_binop_result_dtype(space, result, dtype) + return result diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -36,7 +36,7 @@ SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] -TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] @@ -109,6 +109,9 @@ if stop < 0: stop += size + 1 if step < 0: + start, stop = stop, start + start -= 1 + stop -= 1 lgt = (stop - start + 1) / step + 1 else: lgt = (stop - start - 1) / step + 1 @@ -475,7 +478,6 @@ class SliceConstant(Node): def __init__(self, start, stop, step): - # no negative support for now self.start = start self.stop = stop self.step = step @@ -582,6 +584,9 @@ w_res = arr.descr_dot(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) + elif self.name == "searchsorted": + w_res = arr.descr_searchsorted(interp.space, arg, + interp.space.wrap('left')) else: assert False # unreachable code elif self.name in THREE_ARG_FUNCTIONS: diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -19,6 +19,7 @@ 'strides[*]', 'backstrides[*]', 'order'] start = 0 parent = None + flags = 0 # JIT hints that length of all those arrays is a constant @@ -357,11 +358,11 @@ self.dtype = dtype def argsort(self, space, w_axis): - from pypy.module.micronumpy.sort import argsort_array + from .selection import argsort_array return argsort_array(self, space, w_axis) def sort(self, space, w_axis, w_order): - from pypy.module.micronumpy.sort import sort_array + from .selection import sort_array return sort_array(self, space, w_axis, w_order) def base(self): diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -65,6 +65,9 @@ FLOATINGLTR = 'f' COMPLEXLTR = 'c' +SEARCHLEFT = 0 +SEARCHRIGHT = 1 + ANYORDER = -1 CORDER = 0 FORTRANORDER = 1 @@ -74,6 +77,9 @@ WRAP = 1 RAISE = 2 +ARRAY_C_CONTIGUOUS = 0x0001 +ARRAY_F_CONTIGUOUS = 0x0002 + LITTLE = '<' BIG = '>' NATIVE = '=' diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import constants as NPY @@ -41,6 +41,23 @@ space.wrap("clipmode not understood")) +def searchside_converter(space, w_obj): + try: + s = space.str_w(w_obj) + except OperationError: + s = None + if not s: + raise oefmt(space.w_ValueError, + "expected nonempty string for keyword 'side'") + if s[0] == 'l' or s[0] == 'L': + return NPY.SEARCHLEFT + elif s[0] == 'r' or s[0] == 'R': + return NPY.SEARCHRIGHT + else: + raise oefmt(space.w_ValueError, + "'%s' is an invalid value for keyword 'side'", s) + + def order_converter(space, w_order, default): if space.is_none(w_order): return default diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,7 +3,7 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop +from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter @@ -134,6 +134,15 @@ if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = shape_converter(space, w_shape, dtype) + for dim in shape: + if dim < 0: + raise OperationError(space.w_ValueError, space.wrap( + "negative dimensions are not allowed")) + try: + support.product(shape) + except OverflowError: + raise OperationError(space.w_ValueError, space.wrap( + "array is too big.")) return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -2,6 +2,46 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy import constants as NPY + + +def enable_flags(arr, flags): + arr.flags |= flags + + +def clear_flags(arr, flags): + arr.flags &= ~flags + + +def _update_contiguous_flags(arr): + shape = arr.shape + strides = arr.strides + + is_c_contig = True + sd = arr.dtype.elsize + for i in range(len(shape) - 1, -1, -1): + dim = shape[i] + if strides[i] != sd: + is_c_contig = False + break + if dim == 0: + break + sd *= dim + if is_c_contig: + enable_flags(arr, NPY.ARRAY_C_CONTIGUOUS) + else: + clear_flags(arr, NPY.ARRAY_C_CONTIGUOUS) + + sd = arr.dtype.elsize + for i in range(len(shape)): + dim = shape[i] + if strides[i] != sd: + clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + return + if dim == 0: + break + sd *= dim + enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -1,7 +1,10 @@ from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import loop -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.base import convert_to_array from pypy.module.micronumpy.concrete import BaseConcreteArray +from .ndarray import W_NDimArray class FakeArrayImplementation(BaseConcreteArray): @@ -27,12 +30,22 @@ class W_FlatIterator(W_NDimArray): def __init__(self, arr): self.base = arr + self.iter, self.state = arr.create_iter() # this is needed to support W_NDimArray interface self.implementation = FakeArrayImplementation(self.base) - self.reset() - def reset(self): - self.iter, self.state = self.base.create_iter() + def descr_base(self, space): + return space.wrap(self.base) + + def descr_index(self, space): + return space.wrap(self.state.index) + + def descr_coords(self, space): + self.state = self.iter.update(self.state) + return space.newtuple([space.wrap(c) for c in self.state.indices]) + + def descr_iter(self): + return self def descr_len(self, space): return space.wrap(self.iter.size) @@ -44,40 +57,59 @@ self.state = self.iter.next(self.state) return w_res - def descr_index(self, space): - return space.wrap(self.state.index) - - def descr_coords(self, space): - return space.newtuple([space.wrap(c) for c in self.state.indices]) - def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') - self.reset() - base = self.base - start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter, base_state = base.create_iter() - base_state = base_iter.next_skip_x(base_state, start) - if length == 1: - return base_iter.getitem(base_state) - res = W_NDimArray.from_shape(space, [length], base.get_dtype(), - base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, base_state, step) + try: + start, stop, step, length = space.decode_index4(w_idx, self.iter.size) + state = self.iter.goto(start) + if length == 1: + return self.iter.getitem(state) + base = self.base + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) + return loop.flatiter_getitem(res, self.iter, state, step) + finally: + self.state = self.iter.reset(self.state) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') - base = self.base - start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - arr = convert_to_array(space, w_value) - loop.flatiter_setitem(space, self.base, arr, start, step, length) + start, stop, step, length = space.decode_index4(w_idx, self.iter.size) + try: + state = self.iter.goto(start) + dtype = self.base.get_dtype() + if length == 1: + try: + val = dtype.coerce(space, w_value) + except OperationError: + raise oefmt(space.w_ValueError, "Error setting single item of array.") + self.iter.setitem(state, val) + return + arr = convert_to_array(space, w_value) + loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) + finally: + self.state = self.iter.reset(self.state) - def descr_iter(self): - return self - def descr_base(self, space): - return space.wrap(self.base) +W_FlatIterator.typedef = TypeDef("numpy.flatiter", + base = GetSetProperty(W_FlatIterator.descr_base), + index = GetSetProperty(W_FlatIterator.descr_index), + coords = GetSetProperty(W_FlatIterator.descr_coords), -# typedef is in interp_ndarray, so we see the additional arguments + __iter__ = interp2app(W_FlatIterator.descr_iter), + __len__ = interp2app(W_FlatIterator.descr_len), + next = interp2app(W_FlatIterator.descr_next), + + __getitem__ = interp2app(W_FlatIterator.descr_getitem), + __setitem__ = interp2app(W_FlatIterator.descr_setitem), + + __eq__ = interp2app(W_FlatIterator.descr_eq), + __ne__ = interp2app(W_FlatIterator.descr_ne), + __lt__ = interp2app(W_FlatIterator.descr_lt), + __le__ = interp2app(W_FlatIterator.descr_le), + __gt__ = interp2app(W_FlatIterator.descr_gt), + __ge__ = interp2app(W_FlatIterator.descr_ge), +) diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -35,14 +35,11 @@ [x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] we can go faster. All the calculations happen in next() - -next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot guarantee that we only overflow one single shape -dimension, perhaps we could overflow times in one big step. """ from rpython.rlib import jit -from pypy.module.micronumpy import support +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.flagsobj import _update_contiguous_flags class PureShapeIter(object): @@ -80,7 +77,7 @@ class IterState(object): - _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] + _immutable_fields_ = ['iterator', 'index', 'indices', 'offset'] def __init__(self, iterator, index, indices, offset): self.iterator = iterator @@ -90,11 +87,18 @@ class ArrayIter(object): - _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]'] + _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', + 'strides[*]', 'backstrides[*]', 'factors[*]', + 'track_index'] + + track_index = True def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) + _update_contiguous_flags(array) + self.contiguous = (array.flags & NPY.ARRAY_C_CONTIGUOUS and + array.shape == shape and array.strides == strides) + self.array = array self.size = size self.ndim_m1 = len(shape) - 1 @@ -102,52 +106,79 @@ self.strides = strides self.backstrides = backstrides - def reset(self): - return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) + ndim = len(shape) + factors = [0] * ndim + for i in xrange(ndim): + if i == 0: + factors[ndim-1] = 1 + else: + factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] + self.factors = factors + + @jit.unroll_safe + def reset(self, state=None): + if state is None: + indices = [0] * len(self.shape_m1) + else: + assert state.iterator is self + indices = state.indices + for i in xrange(self.ndim_m1, -1, -1): + indices[i] = 0 + return IterState(self, 0, indices, self.array.start) @jit.unroll_safe def next(self, state): assert state.iterator is self - index = state.index + 1 + index = state.index + if self.track_index: + index += 1 indices = state.indices offset = state.offset - for i in xrange(self.ndim_m1, -1, -1): - idx = indices[i] - if idx < self.shape_m1[i]: - indices[i] = idx + 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] + if self.contiguous: + offset += self.array.dtype.elsize + else: + for i in xrange(self.ndim_m1, -1, -1): + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] return IterState(self, index, indices, offset) @jit.unroll_safe - def next_skip_x(self, state, step): + def goto(self, index): + offset = self.array.start + if self.contiguous: + offset += index * self.array.dtype.elsize + else: + current = index + for i in xrange(len(self.shape_m1)): + offset += (current / self.factors[i]) * self.strides[i] + current %= self.factors[i] + return IterState(self, index, None, offset) + + @jit.unroll_safe + def update(self, state): assert state.iterator is self - assert step >= 0 - if step == 0: + assert self.track_index + if not self.contiguous: return state - index = state.index + step + current = state.index indices = state.indices - offset = state.offset - for i in xrange(self.ndim_m1, -1, -1): - idx = indices[i] - if idx < (self.shape_m1[i] + 1) - step: - indices[i] = idx + step - offset += self.strides[i] * step - break + for i in xrange(len(self.shape_m1)): + if self.factors[i] != 0: + indices[i] = current / self.factors[i] + current %= self.factors[i] else: - rem_step = (idx + step) // (self.shape_m1[i] + 1) - cur_step = step - rem_step * (self.shape_m1[i] + 1) - indices[i] = idx + cur_step - offset += self.strides[i] * cur_step - step = rem_step - assert step > 0 - return IterState(self, index, indices, offset) + indices[i] = 0 + return IterState(self, state.index, indices, state.offset) def done(self, state): assert state.iterator is self + assert self.track_index return state.index >= self.size def getitem(self, state): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -49,6 +49,7 @@ left_iter, left_state = w_lhs.create_iter(shape) right_iter, right_state = w_rhs.create_iter(shape) out_iter, out_state = out.create_iter(shape) + left_iter.track_index = right_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, @@ -72,6 +73,7 @@ out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter, obj_state = w_obj.create_iter(shape) out_iter, out_state = out.create_iter(shape) + obj_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, @@ -266,6 +268,9 @@ iter, state = y_iter, y_state else: iter, state = x_iter, x_state + out_iter.track_index = x_iter.track_index = False + arr_iter.track_index = y_iter.track_index = False + iter.track_index = True shapelen = len(shape) while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, @@ -313,6 +318,7 @@ dtype=dtype) assert not arr_iter.done(arr_state) w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + out_state = out_iter.update(out_state) if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) @@ -382,6 +388,7 @@ assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype outi, outs = result.create_iter() + outi.track_index = False lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) lefts = lefti.reset() @@ -406,7 +413,7 @@ outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) - rights = righti.reset() + rights = righti.reset(rights) lefts = lefti.next(lefts) return result @@ -444,6 +451,7 @@ while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(arr_state): + arr_state = arr_iter.update(arr_state) for d in dims: res_iter.setitem(res_state, box(arr_state.indices[d])) res_state = res_iter.next(res_state) @@ -519,7 +527,7 @@ while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) ri.setitem(rs, base_iter.getitem(base_state)) - base_state = base_iter.next_skip_x(base_state, step) + base_state = base_iter.goto(base_state.index + step) rs = ri.next(rs) return res @@ -527,11 +535,8 @@ greens = ['dtype'], reds = 'auto') -def flatiter_setitem(space, arr, val, start, step, length): - dtype = arr.get_dtype() - arr_iter, arr_state = arr.create_iter() +def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() - arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) val = val_iter.getitem(val_state) @@ -540,9 +545,10 @@ else: val = val.convert_to(space, dtype) arr_iter.setitem(arr_state, val) - # need to repeat i_nput values until all assignments are done - arr_state = arr_iter.next_skip_x(arr_state, step) + arr_state = arr_iter.goto(arr_state.index + step) val_state = val_iter.next(val_state) + if val_iter.done(val_state): + val_state = val_iter.reset(val_state) length -= 1 fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', @@ -778,3 +784,43 @@ out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() out_state = out_iter.next(out_state) + +def _new_binsearch(side, op_name): + binsearch_driver = jit.JitDriver(name='numpy_binsearch_' + side, + greens=['dtype'], + reds='auto') + + def binsearch(space, arr, key, ret): + assert len(arr.get_shape()) == 1 + dtype = key.get_dtype() + op = getattr(dtype.itemtype, op_name) + key_iter, key_state = key.create_iter() + ret_iter, ret_state = ret.create_iter() + ret_iter.track_index = False + size = arr.get_size() + min_idx = 0 + max_idx = size + last_key_val = key_iter.getitem(key_state) + while not key_iter.done(key_state): + key_val = key_iter.getitem(key_state) + if dtype.itemtype.lt(last_key_val, key_val): + max_idx = size + else: + min_idx = 0 + max_idx = max_idx + 1 if max_idx < size else size + last_key_val = key_val + while min_idx < max_idx: + binsearch_driver.jit_merge_point(dtype=dtype) + mid_idx = min_idx + ((max_idx - min_idx) >> 1) + mid_val = arr.getitem(space, [mid_idx]).convert_to(space, dtype) + if op(mid_val, key_val): + min_idx = mid_idx + 1 + else: + max_idx = mid_idx + ret_iter.setitem(ret_state, ret.get_dtype().box(min_idx)) + ret_state = ret_iter.next(ret_state) + key_state = key_iter.next(key_state) + return binsearch + +binsearch_left = _new_binsearch('left', 'lt') +binsearch_right = _new_binsearch('right', 'le') diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -16,9 +16,8 @@ ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.converters import multi_axis_converter, \ - order_converter, shape_converter + order_converter, shape_converter, searchside_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple @@ -475,10 +474,13 @@ return repeat(space, self, repeats, w_axis) def descr_set_flatiter(self, space, w_obj): + iter, state = self.create_iter() + dtype = self.get_dtype() arr = convert_to_array(space, w_obj) - loop.flatiter_setitem(space, self, arr, 0, 1, self.get_size()) + loop.flatiter_setitem(space, dtype, arr, iter, state, 1, iter.size) def descr_get_flatiter(self, space): + from .flatiter import W_FlatIterator return space.wrap(W_FlatIterator(self)) def descr_item(self, space, __args__): @@ -726,29 +728,22 @@ loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out - @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) - def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): + @unwrap_spec(w_side=WrappedDefault('left'), w_sorter=WrappedDefault(None)) + def descr_searchsorted(self, space, w_v, w_side=None, w_sorter=None): if not space.is_none(w_sorter): raise OperationError(space.w_NotImplementedError, space.wrap( 'sorter not supported in searchsort')) - if not side or len(side) < 1: - raise OperationError(space.w_ValueError, space.wrap( - "expected nonempty string for keyword 'side'")) - elif side[0] == 'l' or side[0] == 'L': - side = 'l' - elif side[0] == 'r' or side[0] == 'R': - side = 'r' - else: - raise oefmt(space.w_ValueError, - "'%s' is an invalid value for keyword 'side'", side) - if len(self.get_shape()) > 1: + side = searchside_converter(space, w_side) + if len(self.get_shape()) != 1: raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) - if len(v.get_shape()) > 1: - raise oefmt(space.w_ValueError, "v must be a 1-d array-like") ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) - app_searchsort(space, self, v, space.wrap(side), ret) + if side == NPY.SEARCHLEFT: + binsearch = loop.binsearch_left + else: + binsearch = loop.binsearch_right + binsearch(space, self, v, ret) if ret.is_scalar(): return ret.get_scalar_value() return ret @@ -1311,31 +1306,6 @@ return res """, filename=__file__).interphook('ptp') -app_searchsort = applevel(r""" - def searchsort(arr, v, side, result): - import operator - def func(a, op, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if op(a[imid], val): - imin = imid +1 - else: - imax = imid - return imin - if side == 'l': - op = operator.lt - else: - op = operator.le - if v.size < 2: - result[...] = func(arr, op, v) - else: - for i in range(v.size): - result[i] = func(arr, op, v[i]) - return result -""", filename=__file__).interphook('searchsort') - W_NDimArray.typedef = TypeDef("numpy.ndarray", __new__ = interp2app(descr_new_array), @@ -1423,6 +1393,7 @@ flags = GetSetProperty(W_NDimArray.descr_get_flags), fill = interp2app(W_NDimArray.descr_fill), + tobytes = interp2app(W_NDimArray.descr_tostring), tostring = interp2app(W_NDimArray.descr_tostring), mean = interp2app(W_NDimArray.descr_mean), @@ -1501,23 +1472,3 @@ def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) - - -W_FlatIterator.typedef = TypeDef("numpy.flatiter", - __iter__ = interp2app(W_FlatIterator.descr_iter), - __getitem__ = interp2app(W_FlatIterator.descr_getitem), - __setitem__ = interp2app(W_FlatIterator.descr_setitem), - __len__ = interp2app(W_FlatIterator.descr_len), - - __eq__ = interp2app(W_FlatIterator.descr_eq), - __ne__ = interp2app(W_FlatIterator.descr_ne), - __lt__ = interp2app(W_FlatIterator.descr_lt), - __le__ = interp2app(W_FlatIterator.descr_le), - __gt__ = interp2app(W_FlatIterator.descr_gt), - __ge__ = interp2app(W_FlatIterator.descr_ge), - - next = interp2app(W_FlatIterator.descr_next), - base = GetSetProperty(W_FlatIterator.descr_base), - index = GetSetProperty(W_FlatIterator.descr_index), - coords = GetSetProperty(W_FlatIterator.descr_coords), -) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -313,6 +313,7 @@ # create an iterator for each operand for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + it.contiguous = False self.iters.append((it, it.reset())) def set_op_axes(self, space, w_op_axes): diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/selection.py @@ -0,0 +1,355 @@ +from pypy.interpreter.error import oefmt +from rpython.rlib.listsort import make_timsort_class +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import widen +from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ + free_raw_storage, alloc_raw_storage +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.micronumpy import descriptor, types, constants as NPY +from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.iterators import AllButAxisIter + +INT_SIZE = rffi.sizeof(lltype.Signed) + +all_types = (types.all_float_types + types.all_complex_types + + types.all_int_types) +all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] +all_types = unrolling_iterable(all_types) + + +def make_argsort_function(space, itemtype, comp_type, count=1): + TP = itemtype.T + step = rffi.sizeof(TP) + + class Repr(object): + def __init__(self, index_stride_size, stride_size, size, values, + indexes, index_start, start): + self.index_stride_size = index_stride_size + self.stride_size = stride_size + self.index_start = index_start + self.start = start + self.size = size + self.values = values + self.indexes = indexes + + def getitem(self, item): + if count < 2: + v = raw_storage_getitem(TP, self.values, item * self.stride_size + + self.start) + else: + v = [] + for i in range(count): + _v = raw_storage_getitem(TP, self.values, item * self.stride_size + + self.start + step * i) + v.append(_v) + if comp_type == 'int': + v = widen(v) + elif comp_type == 'float': + v = float(v) + elif comp_type == 'complex': + v = [float(v[0]),float(v[1])] + else: + raise NotImplementedError('cannot reach') + return (v, raw_storage_getitem(lltype.Signed, self.indexes, + item * self.index_stride_size + + self.index_start)) + + def setitem(self, idx, item): + if count < 2: + raw_storage_setitem(self.values, idx * self.stride_size + + self.start, rffi.cast(TP, item[0])) + else: + i = 0 + for val in item[0]: + raw_storage_setitem(self.values, idx * self.stride_size + + self.start + i*step, rffi.cast(TP, val)) + i += 1 + raw_storage_setitem(self.indexes, idx * self.index_stride_size + + self.index_start, item[1]) + + class ArgArrayRepWithStorage(Repr): + def __init__(self, index_stride_size, stride_size, size): + start = 0 + dtype = descriptor.get_dtype_cache(space).w_longdtype + indexes = dtype.itemtype.malloc(size * dtype.elsize) + values = alloc_raw_storage(size * stride_size, + track_allocation=False) + Repr.__init__(self, dtype.elsize, stride_size, + size, values, indexes, start, start) + + def __del__(self): + free_raw_storage(self.indexes, track_allocation=False) + free_raw_storage(self.values, track_allocation=False) + + def arg_getitem(lst, item): + return lst.getitem(item) + + def arg_setitem(lst, item, value): + lst.setitem(item, value) + + def arg_length(lst): + return lst.size + + def arg_getitem_slice(lst, start, stop): + retval = ArgArrayRepWithStorage(lst.index_stride_size, lst.stride_size, + stop-start) + for i in range(stop-start): + retval.setitem(i, lst.getitem(i+start)) + return retval + + if count < 2: + def arg_lt(a, b): + # Does numpy do <= ? + return a[0] < b[0] or b[0] != b[0] and a[0] == a[0] + else: + def arg_lt(a, b): + for i in range(count): + if b[0][i] != b[0][i] and a[0][i] == a[0][i]: + return True + elif b[0][i] == b[0][i] and a[0][i] != a[0][i]: + return False + for i in range(count): + if a[0][i] < b[0][i]: + return True + elif a[0][i] > b[0][i]: + return False + # Does numpy do True? + return False + + ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, + arg_getitem_slice, arg_lt) + + def argsort(arr, space, w_axis, itemsize): + if w_axis is space.w_None: + # note that it's fine ot pass None here as we're not going + # to pass the result around (None is the link to base in slices) + if arr.get_size() > 0: + arr = arr.reshape(None, [arr.get_size()]) + axis = 0 + elif w_axis is None: + axis = -1 + else: + axis = space.int_w(w_axis) + # create array of indexes + dtype = descriptor.get_dtype_cache(space).w_longdtype + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) + storage = index_arr.implementation.get_storage() + if len(arr.get_shape()) == 1: + for i in range(arr.get_size()): + raw_storage_setitem(storage, i * INT_SIZE, i) + r = Repr(INT_SIZE, itemsize, arr.get_size(), arr.get_storage(), + storage, 0, arr.start) + ArgSort(r).sort() + else: + shape = arr.get_shape() + if axis < 0: + axis = len(shape) + axis + if axis < 0 or axis >= len(shape): + raise oefmt(space.w_IndexError, "Wrong axis %d", axis) + arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() + index_impl = index_arr.implementation + index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() + stride_size = arr.strides[axis] + index_stride_size = index_impl.strides[axis] + axis_size = arr.shape[axis] + while not arr_iter.done(arr_state): + for i in range(axis_size): + raw_storage_setitem(storage, i * index_stride_size + + index_state.offset, i) + r = Repr(index_stride_size, stride_size, axis_size, + arr.get_storage(), storage, index_state.offset, arr_state.offset) + ArgSort(r).sort() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) + return index_arr + + return argsort + + +def argsort_array(arr, space, w_axis): + cache = space.fromcache(ArgSortCache) # that populates ArgSortClasses + itemtype = arr.dtype.itemtype + for tp in all_types: + if isinstance(itemtype, tp[0]): + return cache._lookup(tp)(arr, space, w_axis, + itemtype.get_element_size()) + # XXX this should probably be changed + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.get_name()) + + +def make_sort_function(space, itemtype, comp_type, count=1): + TP = itemtype.T + step = rffi.sizeof(TP) + + class Repr(object): + def __init__(self, stride_size, size, values, start): + self.stride_size = stride_size + self.start = start + self.size = size + self.values = values + + def getitem(self, item): + if count < 2: + v = raw_storage_getitem(TP, self.values, item * self.stride_size + + self.start) + else: + v = [] + for i in range(count): + _v = raw_storage_getitem(TP, self.values, item * self.stride_size + + self.start + step * i) + v.append(_v) + if comp_type == 'int': + v = widen(v) + elif comp_type == 'float': + v = float(v) + elif comp_type == 'complex': + v = [float(v[0]),float(v[1])] + else: + raise NotImplementedError('cannot reach') + return (v) + + def setitem(self, idx, item): + if count < 2: + raw_storage_setitem(self.values, idx * self.stride_size + + self.start, rffi.cast(TP, item)) + else: + i = 0 + for val in item: + raw_storage_setitem(self.values, idx * self.stride_size + + self.start + i*step, rffi.cast(TP, val)) + i += 1 + + class ArgArrayRepWithStorage(Repr): + def __init__(self, stride_size, size): + start = 0 + values = alloc_raw_storage(size * stride_size, + track_allocation=False) + Repr.__init__(self, stride_size, + size, values, start) + + def __del__(self): + free_raw_storage(self.values, track_allocation=False) + + def arg_getitem(lst, item): + return lst.getitem(item) + + def arg_setitem(lst, item, value): + lst.setitem(item, value) + + def arg_length(lst): + return lst.size + + def arg_getitem_slice(lst, start, stop): + retval = ArgArrayRepWithStorage(lst.stride_size, stop-start) + for i in range(stop-start): + retval.setitem(i, lst.getitem(i+start)) + return retval + + if count < 2: + def arg_lt(a, b): + # handles NAN and INF + return a < b or b != b and a == a + else: + def arg_lt(a, b): + for i in range(count): + if b[i] != b[i] and a[i] == a[i]: + return True + elif b[i] == b[i] and a[i] != a[i]: + return False + for i in range(count): + if a[i] < b[i]: + return True + elif a[i] > b[i]: + return False + # Does numpy do True? + return False + + ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, + arg_getitem_slice, arg_lt) + + def sort(arr, space, w_axis, itemsize): + if w_axis is space.w_None: + # note that it's fine to pass None here as we're not going + # to pass the result around (None is the link to base in slices) + arr = arr.reshape(None, [arr.get_size()]) + axis = 0 + elif w_axis is None: + axis = -1 + else: + axis = space.int_w(w_axis) + # create array of indexes + if len(arr.get_shape()) == 1: + r = Repr(itemsize, arr.get_size(), arr.get_storage(), + arr.start) + ArgSort(r).sort() + else: + shape = arr.get_shape() + if axis < 0: + axis = len(shape) + axis + if axis < 0 or axis >= len(shape): + raise oefmt(space.w_IndexError, "Wrong axis %d", axis) + arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() + stride_size = arr.strides[axis] + axis_size = arr.shape[axis] + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) + ArgSort(r).sort() + arr_state = arr_iter.next(arr_state) + + return sort + + +def sort_array(arr, space, w_axis, w_order): + cache = space.fromcache(SortCache) # that populates SortClasses + itemtype = arr.dtype.itemtype + if arr.dtype.byteorder == NPY.OPPBYTE: + raise oefmt(space.w_NotImplementedError, + "sorting of non-native byteorder not supported yet") + for tp in all_types: + if isinstance(itemtype, tp[0]): + return cache._lookup(tp)(arr, space, w_axis, + itemtype.get_element_size()) + # XXX this should probably be changed + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.get_name()) + + +class ArgSortCache(object): + built = False + + def __init__(self, space): + if self.built: + return + self.built = True + cache = {} + for cls, it in all_types._items: + if it == 'complex': + cache[cls] = make_argsort_function(space, cls, it, 2) + else: + cache[cls] = make_argsort_function(space, cls, it) + self.cache = cache + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) + + +class SortCache(object): + built = False + + def __init__(self, space): + if self.built: + return + self.built = True + cache = {} + for cls, it in all_types._items: + if it == 'complex': + cache[cls] = make_sort_function(space, cls, it, 2) + else: + cache[cls] = make_sort_function(space, cls, it) + self.cache = cache + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py deleted file mode 100644 --- a/pypy/module/micronumpy/sort.py +++ /dev/null @@ -1,355 +0,0 @@ -from pypy.interpreter.error import oefmt -from rpython.rlib.listsort import make_timsort_class -from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen -from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ - free_raw_storage, alloc_raw_storage -from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import descriptor, types, constants as NPY -from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iterators import AllButAxisIter - -INT_SIZE = rffi.sizeof(lltype.Signed) - -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] -all_types = unrolling_iterable(all_types) - - -def make_argsort_function(space, itemtype, comp_type, count=1): - TP = itemtype.T - step = rffi.sizeof(TP) - - class Repr(object): - def __init__(self, index_stride_size, stride_size, size, values, - indexes, index_start, start): - self.index_stride_size = index_stride_size - self.stride_size = stride_size - self.index_start = index_start - self.start = start - self.size = size - self.values = values - self.indexes = indexes - - def getitem(self, item): - if count < 2: - v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start) - else: - v = [] - for i in range(count): - _v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start + step * i) - v.append(_v) - if comp_type == 'int': - v = widen(v) - elif comp_type == 'float': - v = float(v) - elif comp_type == 'complex': - v = [float(v[0]),float(v[1])] - else: - raise NotImplementedError('cannot reach') - return (v, raw_storage_getitem(lltype.Signed, self.indexes, - item * self.index_stride_size + - self.index_start)) - - def setitem(self, idx, item): - if count < 2: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start, rffi.cast(TP, item[0])) - else: - i = 0 - for val in item[0]: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start + i*step, rffi.cast(TP, val)) - i += 1 - raw_storage_setitem(self.indexes, idx * self.index_stride_size + - self.index_start, item[1]) - - class ArgArrayRepWithStorage(Repr): - def __init__(self, index_stride_size, stride_size, size): - start = 0 - dtype = descriptor.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size * dtype.elsize) - values = alloc_raw_storage(size * stride_size, - track_allocation=False) - Repr.__init__(self, dtype.elsize, stride_size, - size, values, indexes, start, start) - - def __del__(self): - free_raw_storage(self.indexes, track_allocation=False) - free_raw_storage(self.values, track_allocation=False) - - def arg_getitem(lst, item): - return lst.getitem(item) - - def arg_setitem(lst, item, value): - lst.setitem(item, value) - - def arg_length(lst): - return lst.size - - def arg_getitem_slice(lst, start, stop): - retval = ArgArrayRepWithStorage(lst.index_stride_size, lst.stride_size, - stop-start) - for i in range(stop-start): - retval.setitem(i, lst.getitem(i+start)) - return retval - - if count < 2: - def arg_lt(a, b): - # Does numpy do <= ? - return a[0] < b[0] or b[0] != b[0] and a[0] == a[0] - else: - def arg_lt(a, b): - for i in range(count): - if b[0][i] != b[0][i] and a[0][i] == a[0][i]: - return True - elif b[0][i] == b[0][i] and a[0][i] != a[0][i]: - return False - for i in range(count): - if a[0][i] < b[0][i]: - return True - elif a[0][i] > b[0][i]: - return False - # Does numpy do True? - return False - - ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, - arg_getitem_slice, arg_lt) - - def argsort(arr, space, w_axis, itemsize): - if w_axis is space.w_None: - # note that it's fine ot pass None here as we're not going - # to pass the result around (None is the link to base in slices) - if arr.get_size() > 0: - arr = arr.reshape(None, [arr.get_size()]) - axis = 0 - elif w_axis is None: - axis = -1 - else: - axis = space.int_w(w_axis) - # create array of indexes - dtype = descriptor.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) - storage = index_arr.implementation.get_storage() - if len(arr.get_shape()) == 1: - for i in range(arr.get_size()): - raw_storage_setitem(storage, i * INT_SIZE, i) - r = Repr(INT_SIZE, itemsize, arr.get_size(), arr.get_storage(), - storage, 0, arr.start) - ArgSort(r).sort() - else: - shape = arr.get_shape() - if axis < 0: - axis = len(shape) + axis - if axis < 0 or axis >= len(shape): - raise oefmt(space.w_IndexError, "Wrong axis %d", axis) - arr_iter = AllButAxisIter(arr, axis) - arr_state = arr_iter.reset() - index_impl = index_arr.implementation - index_iter = AllButAxisIter(index_impl, axis) - index_state = index_iter.reset() - stride_size = arr.strides[axis] - index_stride_size = index_impl.strides[axis] - axis_size = arr.shape[axis] - while not arr_iter.done(arr_state): - for i in range(axis_size): - raw_storage_setitem(storage, i * index_stride_size + - index_state.offset, i) - r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_state.offset, arr_state.offset) - ArgSort(r).sort() - arr_state = arr_iter.next(arr_state) - index_state = index_iter.next(index_state) - return index_arr - - return argsort - - -def argsort_array(arr, space, w_axis): - cache = space.fromcache(ArgSortCache) # that populates ArgSortClasses - itemtype = arr.dtype.itemtype - for tp in all_types: - if isinstance(itemtype, tp[0]): - return cache._lookup(tp)(arr, space, w_axis, - itemtype.get_element_size()) - # XXX this should probably be changed - raise oefmt(space.w_NotImplementedError, - "sorting of non-numeric types '%s' is not implemented", - arr.dtype.get_name()) - - -def make_sort_function(space, itemtype, comp_type, count=1): - TP = itemtype.T - step = rffi.sizeof(TP) - - class Repr(object): - def __init__(self, stride_size, size, values, start): - self.stride_size = stride_size - self.start = start - self.size = size - self.values = values - - def getitem(self, item): - if count < 2: - v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start) - else: - v = [] - for i in range(count): - _v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start + step * i) - v.append(_v) - if comp_type == 'int': - v = widen(v) - elif comp_type == 'float': - v = float(v) - elif comp_type == 'complex': - v = [float(v[0]),float(v[1])] - else: - raise NotImplementedError('cannot reach') - return (v) - - def setitem(self, idx, item): - if count < 2: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start, rffi.cast(TP, item)) - else: - i = 0 - for val in item: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start + i*step, rffi.cast(TP, val)) - i += 1 - - class ArgArrayRepWithStorage(Repr): - def __init__(self, stride_size, size): - start = 0 - values = alloc_raw_storage(size * stride_size, - track_allocation=False) - Repr.__init__(self, stride_size, - size, values, start) - - def __del__(self): - free_raw_storage(self.values, track_allocation=False) - - def arg_getitem(lst, item): - return lst.getitem(item) - - def arg_setitem(lst, item, value): - lst.setitem(item, value) - - def arg_length(lst): - return lst.size - - def arg_getitem_slice(lst, start, stop): - retval = ArgArrayRepWithStorage(lst.stride_size, stop-start) - for i in range(stop-start): - retval.setitem(i, lst.getitem(i+start)) - return retval - - if count < 2: - def arg_lt(a, b): - # handles NAN and INF - return a < b or b != b and a == a - else: - def arg_lt(a, b): - for i in range(count): - if b[i] != b[i] and a[i] == a[i]: - return True - elif b[i] == b[i] and a[i] != a[i]: - return False - for i in range(count): - if a[i] < b[i]: - return True - elif a[i] > b[i]: - return False - # Does numpy do True? - return False - - ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, - arg_getitem_slice, arg_lt) - - def sort(arr, space, w_axis, itemsize): - if w_axis is space.w_None: - # note that it's fine to pass None here as we're not going - # to pass the result around (None is the link to base in slices) - arr = arr.reshape(None, [arr.get_size()]) - axis = 0 - elif w_axis is None: - axis = -1 - else: - axis = space.int_w(w_axis) - # create array of indexes - if len(arr.get_shape()) == 1: - r = Repr(itemsize, arr.get_size(), arr.get_storage(), - arr.start) - ArgSort(r).sort() - else: - shape = arr.get_shape() - if axis < 0: - axis = len(shape) + axis - if axis < 0 or axis >= len(shape): - raise oefmt(space.w_IndexError, "Wrong axis %d", axis) - arr_iter = AllButAxisIter(arr, axis) - arr_state = arr_iter.reset() - stride_size = arr.strides[axis] - axis_size = arr.shape[axis] - while not arr_iter.done(arr_state): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) - ArgSort(r).sort() - arr_state = arr_iter.next(arr_state) - - return sort - - -def sort_array(arr, space, w_axis, w_order): - cache = space.fromcache(SortCache) # that populates SortClasses - itemtype = arr.dtype.itemtype - if arr.dtype.byteorder == NPY.OPPBYTE: - raise oefmt(space.w_NotImplementedError, - "sorting of non-native byteorder not supported yet") - for tp in all_types: - if isinstance(itemtype, tp[0]): - return cache._lookup(tp)(arr, space, w_axis, - itemtype.get_element_size()) - # XXX this should probably be changed - raise oefmt(space.w_NotImplementedError, - "sorting of non-numeric types '%s' is not implemented", - arr.dtype.get_name()) - - -class ArgSortCache(object): - built = False - - def __init__(self, space): - if self.built: - return - self.built = True - cache = {} - for cls, it in all_types._items: - if it == 'complex': - cache[cls] = make_argsort_function(space, cls, it, 2) - else: - cache[cls] = make_argsort_function(space, cls, it) - self.cache = cache - self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) - - -class SortCache(object): - built = False - - def __init__(self, space): - if self.built: - return - self.built = True - cache = {} - for cls, it in all_types._items: - if it == 'complex': - cache[cls] = make_sort_function(space, cls, it, 2) - else: - cache[cls] = make_sort_function(space, cls, it) - self.cache = cache - self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.rarithmetic import ovfcheck def issequence_w(space, w_obj): @@ -23,7 +24,7 @@ def product(s): i = 1 for x in s: - i *= x + i = ovfcheck(i * x) return i diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -199,3 +199,19 @@ a.put(23, -1, mode=1) # wrap assert (a == array([0, 1, -10, -1, -15])).all() raises(TypeError, "arange(5).put(22, -5, mode='zzzz')") # unrecognized mode + + def test_result_type(self): + import numpy as np + exc = raises(ValueError, np.result_type) + assert str(exc.value) == "at least one array or dtype is required" + exc = raises(TypeError, np.result_type, a=2) + assert str(exc.value) == "result_type() takes no keyword arguments" + assert np.result_type(True) is np.dtype('bool') + assert np.result_type(1) is np.dtype('int') + assert np.result_type(1.) is np.dtype('float64') + assert np.result_type(1+2j) is np.dtype('complex128') + assert np.result_type(1, 1.) is np.dtype('float64') + assert np.result_type(np.array([1, 2])) is np.dtype('int64') + assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128') + assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64') + assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64') diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -330,3 +330,12 @@ results = interp.results[0] assert isinstance(results, W_NDimArray) assert results.get_dtype().is_int() + + def test_searchsorted(self): + interp = self.run(''' + a = [1, 4, 5, 6, 9] + b = |30| -> ::-1 + c = searchsorted(a, b) + c -> -1 + ''') + assert interp.results[0].value == 0 diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -3,7 +3,15 @@ class MockArray(object): - start = 0 + flags = 0 + + class dtype: + elsize = 1 + + def __init__(self, shape, strides, start=0): + self.shape = shape + self.strides = strides + self.start = start class TestIterDirect(object): @@ -14,19 +22,24 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIter(MockArray, support.product(shape), shape, + i = ArrayIter(MockArray(shape, strides), support.product(shape), shape, strides, backstrides) + assert i.contiguous s = i.reset() s = i.next(s) s = i.next(s) s = i.next(s) assert s.offset == 3 assert not i.done(s) + assert s.indices == [0,0] + s = i.update(s) assert s.indices == [0,3] #cause a dimension overflow s = i.next(s) s = i.next(s) assert s.offset == 5 + assert s.indices == [0,3] + s = i.update(s) assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 @@ -34,8 +47,9 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIter(MockArray, support.product(shape), shape, + i = ArrayIter(MockArray(shape, strides), support.product(shape), shape, strides, backstrides) + assert not i.contiguous s = i.reset() s = i.next(s) From noreply at buildbot.pypy.org Fri Oct 10 16:15:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 16:15:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix an obscure case Message-ID: <20141010141547.91D1B1C35AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73885:c6df80a771d4 Date: 2014-10-10 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/c6df80a771d4/ Log: Fix an obscure case diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -764,9 +764,12 @@ def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): + assert isinstance(value, VArrayValue) indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: itemvalue = value.getitem(indexbox.getint()) + if itemvalue is None: # reading uninitialized array items? + itemvalue = value.constvalue # bah, just return 0 self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() From noreply at buildbot.pypy.org Fri Oct 10 18:11:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 18:11:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for RPython code like '[0] * n', which was incorrectly jitted with Message-ID: <20141010161131.978A11C04B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73886:6f51f0dc9cb0 Date: 2014-10-10 18:09 +0200 http://bitbucket.org/pypy/pypy/changeset/6f51f0dc9cb0/ Log: Fix for RPython code like '[0] * n', which was incorrectly jitted with 'new_array', i.e. the version that doesn't fill the result with zeroes. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1653,41 +1653,34 @@ self._get_list_nonneg_canraise_flags(op) def _get_initial_newlist_length(self, op, args): - # normalize number of arguments to the 'newlist' function - if len(args) > 1: - v_default = args[1] # initial value: must be 0 or NULL - ARRAY = deref(op.result.concretetype) - if (not isinstance(v_default, Constant) or - v_default.value != arrayItem(ARRAY)._defl()): - raise NotSupported("variable or non-null initial value") - if len(args) >= 1: - return args[0] + assert len(args) <= 1 + if len(args) == 1: + v_length = args[0] + assert v_length.concretetype is lltype.Signed + return v_length else: return Constant(0, lltype.Signed) # length: default to 0 # ---------- fixed lists ---------- def do_fixed_newlist(self, op, args, arraydescr): + # corresponds to rtyper.lltypesystem.rlist.newlist: + # the items may be uninitialized. v_length = self._get_initial_newlist_length(op, args) - assert v_length.concretetype is lltype.Signed - ops = [] - if isinstance(v_length, Constant): - if v_length.value >= 0: - v = v_length - else: - v = Constant(0, lltype.Signed) - else: - v = Variable('new_length') - v.concretetype = lltype.Signed - ops.append(SpaceOperation('int_force_ge_zero', [v_length], v)) ARRAY = op.result.concretetype.TO if ((isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF._needsgc()) or isinstance(ARRAY.OF, lltype.Struct)): opname = 'new_array_clear' else: opname = 'new_array' - ops.append(SpaceOperation(opname, [v, arraydescr], op.result)) - return ops + return SpaceOperation(opname, [v_length, arraydescr], op.result) + + def do_fixed_newlist_clear(self, op, args, arraydescr): + # corresponds to rtyper.rlist.ll_alloc_and_clear: + # needs to clear the items. + v_length = self._get_initial_newlist_length(op, args) + return SpaceOperation('new_array_clear', [v_length, arraydescr], + op.result) def do_fixed_list_len(self, op, args, arraydescr): if args[0] in self.vable_array_vars: # virtualizable array @@ -1757,6 +1750,14 @@ arraydescr], op.result) + def do_resizable_newlist_clear(self, op, args, arraydescr, lengthdescr, + itemsdescr, structdescr): + v_length = self._get_initial_newlist_length(op, args) + return SpaceOperation('newlist_clear', + [v_length, structdescr, lengthdescr, itemsdescr, + arraydescr], + op.result) + def do_resizable_newlist_hint(self, op, args, arraydescr, lengthdescr, itemsdescr, structdescr): v_hint = self._get_initial_newlist_length(op, args) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -180,11 +180,11 @@ return LIST.ll_newlist(0) def _ll_1_newlist(LIST, count): return LIST.ll_newlist(count) -def _ll_2_newlist(LIST, count, item): - return rlist.ll_alloc_and_set(LIST, count, item) _ll_0_newlist.need_result_type = True _ll_1_newlist.need_result_type = True -_ll_2_newlist.need_result_type = True + +_ll_1_newlist_clear = rlist._ll_alloc_and_clear +_ll_1_newlist_clear.need_result_type = True def _ll_1_newlist_hint(LIST, hint): return LIST.ll_newlist_hint(hint) diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -232,18 +232,3 @@ assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s - -def test_newlist_negativ(): - def f(n): - l = [0] * n - return len(l) - - rtyper = support.annotate(f, [-1]) - jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) - cw = CodeWriter(FakeCPU(rtyper), [jitdriver_sd]) - graphs = cw.find_all_graphs(FakePolicy()) - backend_optimizations(rtyper.annotator.translator, graphs=graphs) - cw.make_jitcodes(verbose=True) - s = jitdriver_sd.mainjitcode.dump() - assert 'int_force_ge_zero' in s - assert 'new_array' in s diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -87,20 +87,10 @@ """new_array $0, -> %r0""") builtin_test('newlist', [Constant(5, lltype.Signed)], FIXEDLIST, """new_array $5, -> %r0""") - builtin_test('newlist', [Constant(-2, lltype.Signed)], FIXEDLIST, - """new_array $0, -> %r0""") builtin_test('newlist', [varoftype(lltype.Signed)], FIXEDLIST, - """int_force_ge_zero %i0 -> %i1\n""" - """new_array %i1, -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(0, lltype.Signed)], FIXEDLIST, - """new_array $5, -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(1, lltype.Signed)], FIXEDLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - varoftype(lltype.Signed)], FIXEDLIST, - NotSupported) + """new_array %i0, -> %r0""") + builtin_test('newlist_clear', [Constant(5, lltype.Signed)], FIXEDLIST, + """new_array_clear $5, -> %r0""") builtin_test('newlist', [], FIXEDPTRLIST, """new_array_clear $0, -> %r0""") @@ -179,15 +169,8 @@ """newlist $5, """+alldescrs+""" -> %r0""") builtin_test('newlist', [varoftype(lltype.Signed)], VARLIST, """newlist %i0, """+alldescrs+""" -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(0, lltype.Signed)], VARLIST, - """newlist $5, """+alldescrs+""" -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(1, lltype.Signed)], VARLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - varoftype(lltype.Signed)], VARLIST, - NotSupported) + builtin_test('newlist_clear', [Constant(5, lltype.Signed)], VARLIST, + """newlist_clear $5, """+alldescrs+""" -> %r0""") def test_resizable_getitem(): builtin_test('list.getitem/NONNEG', diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1017,6 +1017,15 @@ return result @arguments("cpu", "i", "d", "d", "d", "d", returns="r") + def bhimpl_newlist_clear(cpu, length, structdescr, lengthdescr, + itemsdescr, arraydescr): + result = cpu.bh_new(structdescr) + cpu.bh_setfield_gc_i(result, length, lengthdescr) + items = cpu.bh_new_array_clear(length, arraydescr) + cpu.bh_setfield_gc_r(result, items, itemsdescr) + return result + + @arguments("cpu", "i", "d", "d", "d", "d", returns="r") def bhimpl_newlist_hint(cpu, lengthhint, structdescr, lengthdescr, itemsdescr, arraydescr): result = cpu.bh_new(structdescr) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -522,6 +522,15 @@ return sbox @arguments("box", "descr", "descr", "descr", "descr") + def opimpl_newlist_clear(self, sizebox, structdescr, lengthdescr, + itemsdescr, arraydescr): + sbox = self.opimpl_new(structdescr) + self._opimpl_setfield_gc_any(sbox, sizebox, lengthdescr) + abox = self.opimpl_new_array_clear(sizebox, arraydescr) + self._opimpl_setfield_gc_any(sbox, abox, itemsdescr) + return sbox + + @arguments("box", "descr", "descr", "descr", "descr") def opimpl_newlist_hint(self, sizehintbox, structdescr, lengthdescr, itemsdescr, arraydescr): sbox = self.opimpl_new(structdescr) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -12,7 +12,7 @@ AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, isconstant, isvirtual, set_param, record_known_class) from rpython.rlib.longlong2float import float2longlong, longlong2float -from rpython.rlib.rarithmetic import ovfcheck, is_valid_int +from rpython.rlib.rarithmetic import ovfcheck, is_valid_int, int_force_ge_zero from rpython.rtyper.lltypesystem import lltype, rffi @@ -4111,3 +4111,11 @@ res = self.meta_interp(f, [10]) assert res == 42 + + def test_int_force_ge_zero(self): + def f(n): + return int_force_ge_zero(n) + res = self.interp_operations(f, [42]) + assert res == 42 + res = self.interp_operations(f, [-42]) + assert res == 0 diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -414,3 +414,13 @@ res = self.meta_interp(f, [10]) assert res == 0 self.check_resops(call=0, cond_call=2) + + def test_zero_init_resizable(self): + def f(n): + l = [0] * n + l.append(123) + return len(l) + l[0] + l[1] + l[2] + l[3] + l[4] + l[5] + l[6] + + res = self.interp_operations(f, [10], listops=True, inline=True) + assert res == 11 + self.check_operations_history(new_array_clear=1) diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -635,6 +635,12 @@ assert n <= p return llop.int_between(lltype.Bool, n, m, p) +def int_force_ge_zero(n): + """ The JIT special-cases this too. """ + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + return llop.int_force_ge_zero(lltype.Signed, n) + @objectmodel.specialize.ll() def byteswap(arg): """ Convert little->big endian and the opposite diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -216,6 +216,7 @@ 'int_xor': LLOp(canfold=True), 'int_between': LLOp(canfold=True), # a <= b < c + 'int_force_ge_zero': LLOp(canfold=True), # 0 if a < 0 else a 'int_add_ovf': LLOp(canraise=(OverflowError,), tryfold=True), 'int_add_nonneg_ovf': LLOp(canraise=(OverflowError,), tryfold=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -225,6 +225,12 @@ assert lltype.typeOf(c) is lltype.Signed return a <= b < c +def op_int_force_ge_zero(a): + assert lltype.typeOf(a) is lltype.Signed + if a < 0: + return 0 + return a + def op_int_and(x, y): if not is_valid_int(x): from rpython.rtyper.lltypesystem import llgroup diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -2,9 +2,10 @@ from rpython.flowspace.model import Constant from rpython.rlib import rgc, jit, types from rpython.rlib.debug import ll_assert -from rpython.rlib.objectmodel import malloc_zero_filled, enforceargs +from rpython.rlib.objectmodel import malloc_zero_filled, enforceargs, specialize from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck, widen, r_uint, intmask +from rpython.rlib.rarithmetic import int_force_ge_zero from rpython.rtyper.annlowlevel import ADTInterface from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool @@ -474,15 +475,8 @@ # when we compute "ll_length() + 1". -# jit note: this is normally special-cased by the oopspec, -# but if item != const(0), then the special-casing fails and -# we fall back to the look_inside_iff. - at jit.look_inside_iff(lambda LIST, count, item: jit.isconstant(count) and count < 137) - at jit.oopspec("newlist(count, item)") -def ll_alloc_and_set(LIST, count, item): - if count < 0: - count = 0 - l = LIST.ll_newlist(count) +def _ll_zero_or_null(item): + # Check if 'item' is zero/null, or not. T = typeOf(item) if T is Char or T is UniChar: check = ord(item) @@ -490,13 +484,60 @@ check = widen(item) else: check = item - # as long as malloc is known to zero the allocated memory avoid zeroing - # twice - if jit.we_are_jitted() or (not malloc_zero_filled) or check: - i = 0 - while i < count: - l.ll_setitem_fast(i, item) - i += 1 + return not check + + at specialize.memo() +def _null_of_type(T): + return T._defl() + +def ll_alloc_and_set(LIST, count, item): + count = int_force_ge_zero(count) + if jit.we_are_jitted(): + return _ll_alloc_and_set_jit(LIST, count, item) + else: + return _ll_alloc_and_set_nojit(LIST, count, item) + +def _ll_alloc_and_set_nojit(LIST, count, item): + l = LIST.ll_newlist(count) + if malloc_zero_filled and _ll_zero_or_null(item): + return l + i = 0 + while i < count: + l.ll_setitem_fast(i, item) + i += 1 + return l + +def _ll_alloc_and_set_jit(LIST, count, item): + if _ll_zero_or_null(item): + # 'item' is zero/null. Do the list allocation with the + # function _ll_alloc_and_clear(), which the JIT knows about. + return _ll_alloc_and_clear(LIST, count) + else: + # 'item' is not zero/null. Do the list allocation with the + # function _ll_alloc_and_set_nonnull(). That function has + # a JIT marker to unroll it, but only if the 'count' is + # a not-too-large constant. + return _ll_alloc_and_set_nonnull(LIST, count, item) + + at jit.oopspec("newlist_clear(count)") +def _ll_alloc_and_clear(LIST, count): + l = LIST.ll_newlist(count) + if malloc_zero_filled: + return l + zeroitem = _null_of_type(LIST.ITEM) + i = 0 + while i < count: + l.ll_setitem_fast(i, zeroitem) + i += 1 + return l + + at jit.look_inside_iff(lambda LIST, count, item: jit.isconstant(count) and count < 137) +def _ll_alloc_and_set_nonnull(LIST, count, item): + l = LIST.ll_newlist(count) + i = 0 + while i < count: + l.ll_setitem_fast(i, item) + i += 1 return l From noreply at buildbot.pypy.org Fri Oct 10 18:25:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 18:25:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Untested: fix a performance XXX Message-ID: <20141010162547.BE24D1D22A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73887:372d7d13aed2 Date: 2014-10-10 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/372d7d13aed2/ Log: Untested: fix a performance XXX diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1221,17 +1221,29 @@ length_box.getint() <= 14 and # same limit as GCC itemsize in (4, 2, 1)): # Inline a series of STR operations, starting at 'dstaddr_loc'. - # XXX we could optimize STRB/STRH into STR, but this needs care: - # XXX it only works if startindex_loc is a constant, otherwise - # XXX we'd be doing unaligned accesses + next_group = -1 + if itemsize < 4 and startindex >= 0: + # we optimize STRB/STRH into STR, but this needs care: + # it only works if startindex_loc is a constant, otherwise + # we'd be doing unaligned accesses. + next_group = (-startindex * itemsize) & 3 + # self.mc.gen_load_int(r.ip.value, 0) - for i in range(length_box.getint()): - if itemsize == 4: - self.mc.STR_ri(r.ip.value, dstaddr_loc.value, imm=i*4) - elif itemsize == 2: - self.mc.STRH_ri(r.ip.value, dstaddr_loc.value, imm=i*2) + i = 0 + total_size = length_box.getint() * itemsize + while i < total_size: + sz = itemsize + if i == next_group: + next_group += 4 + if next_group <= total_size: + sz = 4 + if sz == 4: + self.mc.STR_ri(r.ip.value, dstaddr_loc.value, imm=i) + elif sz == 2: + self.mc.STRH_ri(r.ip.value, dstaddr_loc.value, imm=i) else: - self.mc.STRB_ri(r.ip.value, dstaddr_loc.value, imm=i*1) + self.mc.STRB_ri(r.ip.value, dstaddr_loc.value, imm=i) + i += sz else: if isinstance(length_box, ConstInt): From noreply at buildbot.pypy.org Fri Oct 10 18:28:18 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:28:18 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: update whatsnew Message-ID: <20141010162818.4D00C1D2487@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73888:cf0d3249948d Date: 2014-10-10 17:27 +0100 http://bitbucket.org/pypy/pypy/changeset/cf0d3249948d/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,7 @@ .. branch: rtyper-stuff Small internal refactorings in the rtyper. + +.. branch: var-in-Some +Store annotations on the Variable objects, rather than in a big dict. +Introduce a new framework for double-dispatched annotation implementations. From noreply at buildbot.pypy.org Fri Oct 10 18:31:30 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:31:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge branch 'var-in-Some' Message-ID: <20141010163130.6E0101D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73889:46914a24fcd4 Date: 2014-10-10 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/46914a24fcd4/ Log: Merge branch 'var-in-Some' Store annotations on the Variable objects, rather than in a big dict. Introduce a new framework for double-dispatched annotation implementations. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,7 @@ .. branch: rtyper-stuff Small internal refactorings in the rtyper. + +.. branch: var-in-Some +Store annotations on the Variable objects, rather than in a big dict. +Introduce a new framework for double-dispatched annotation implementations. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -17,7 +17,6 @@ log = py.log.Producer("annrpython") py.log.setconsumer("annrpython", ansi_log) -FAIL = object() class RPythonAnnotator(object): """Block annotator for RPython. @@ -33,7 +32,6 @@ translator.annotator = self self.translator = translator self.pendingblocks = {} # map {block: graph-containing-it} - self.bindings = {} # map Variables to SomeValues self.annotated = {} # set of blocks already seen self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed @@ -54,7 +52,7 @@ self.bookkeeper = bookkeeper def __getstate__(self): - attrs = """translator pendingblocks bindings annotated links_followed + attrs = """translator pendingblocks annotated links_followed notify bookkeeper frozen policy added_blocks""".split() ret = self.__dict__.copy() for key, value in ret.items(): @@ -143,7 +141,7 @@ # recursively proceed until no more pending block is left if complete_now: self.complete() - return self.binding(flowgraph.getreturnvar(), None) + return self.annotation(flowgraph.getreturnvar()) def gettype(self, variable): """Return the known type of a control flow graph variable, @@ -151,9 +149,9 @@ if isinstance(variable, Constant): return type(variable.value) elif isinstance(variable, Variable): - cell = self.bindings.get(variable) - if cell: - return cell.knowntype + s_variable = variable.annotation + if s_variable: + return s_variable.knowntype else: return object else: @@ -221,37 +219,39 @@ raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() - if v not in self.bindings: + if v.annotation is None: self.setbinding(v, annmodel.s_ImpossibleValue) # policy-dependent computation self.bookkeeper.compute_at_fixpoint() - def binding(self, arg, default=FAIL): + def annotation(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): - try: - return self.bindings[arg] - except KeyError: - if default is not FAIL: - return default - else: - raise + return arg.annotation elif isinstance(arg, Constant): return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) + def binding(self, arg): + "Gives the SomeValue corresponding to the given Variable or Constant." + s_arg = self.annotation(arg) + if s_arg is None: + raise KeyError + return s_arg + def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) def setbinding(self, arg, s_value): - if arg in self.bindings: - assert s_value.contains(self.bindings[arg]) - self.bindings[arg] = s_value + s_old = arg.annotation + if s_old is not None: + assert s_value.contains(s_old) + arg.annotation = s_value def transfer_binding(self, v_target, v_source): - assert v_source in self.bindings - self.bindings[v_target] = self.bindings[v_source] + assert v_source.annotation is not None + v_target.annotation = v_source.annotation def warning(self, msg, pos=None): if pos is None: @@ -290,7 +290,7 @@ # get the (current) return value v = graph.getreturnvar() try: - return self.bindings[v] + return self.binding(v) except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) @@ -328,7 +328,7 @@ # * block not in self.annotated: # never seen the block. # * self.annotated[block] == False: - # the input variables of the block are in self.bindings but we + # the input variables of the block have bindings but we # still have to consider all the operations in the block. # * self.annotated[block] == graph-containing-block: # analysis done (at least until we find we must generalize the @@ -443,7 +443,7 @@ # is known exits = block.exits if isinstance(block.exitswitch, Variable): - s_exitswitch = self.bindings[block.exitswitch] + s_exitswitch = self.binding(block.exitswitch) if s_exitswitch.is_constant(): exits = [link for link in exits if link.exitcase == s_exitswitch.const] @@ -452,20 +452,7 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.dispatch == 2: - arg1 = self.binding(op.args[0]) - arg2 = self.binding(op.args[1]) - binop = getattr(pair(arg1, arg2), op.opname, None) - can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.dispatch == 1: - arg1 = self.binding(op.args[0]) - opname = op.opname - if opname == 'contains': opname = 'op_contains' - unop = getattr(arg1, opname, None) - can_only_throw = annmodel.read_can_only_throw(unop, arg1) - else: - can_only_throw = None - + can_only_throw = op.get_can_only_throw(self) if can_only_throw is not None: candidates = can_only_throw candidate_exits = exits @@ -482,8 +469,10 @@ # mapping (exitcase, variable) -> s_annotation # that can be attached to booleans, exitswitches - knowntypedata = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", {}) + knowntypedata = {} + if isinstance(block.exitswitch, Variable): + knowntypedata = getattr(self.binding(block.exitswitch), + "knowntypedata", {}) for link in exits: self.follow_link(graph, link, knowntypedata) if block in self.notify: @@ -578,22 +567,19 @@ self.links_followed[link] = True self.addpendingblock(graph, link.target, cells) - #___ creating the annotations based on operations ______ def consider_op(self, op): - argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the # more general results invariant: e.g. if SomeImpossibleValue enters is_ # is_(SomeImpossibleValue, None) -> SomeBool # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): + for arg in op.args: + if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue): raise BlockedInference(self, op, -1) - resultcell = op.consider(self, *argcells) + resultcell = op.consider(self, *op.args) if resultcell is None: resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -13,7 +13,7 @@ SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue +from rpython.annotator.bookkeeper import immutablevalue from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op from rpython.rlib import rarithmetic @@ -23,6 +23,54 @@ if oper.dispatch == 2]) + at op.is_.register(SomeObject, SomeObject) +def is__default(annotator, obj1, obj2): + r = SomeBool() + s_obj1 = annotator.annotation(obj1) + s_obj2 = annotator.annotation(obj2) + if s_obj2.is_constant(): + if s_obj1.is_constant(): + r.const = s_obj1.const is s_obj2.const + if s_obj2.const is None and not s_obj1.can_be_none(): + r.const = False + elif s_obj1.is_constant(): + if s_obj1.const is None and not s_obj2.can_be_none(): + r.const = False + knowntypedata = {} + bk = annotator.bookkeeper + + def bind(src_obj, tgt_obj): + s_src = annotator.annotation(src_obj) + s_tgt = annotator.annotation(tgt_obj) + if hasattr(s_tgt, 'is_type_of') and s_src.is_constant(): + add_knowntypedata( + knowntypedata, True, + s_tgt.is_type_of, + bk.valueoftype(s_src.const)) + add_knowntypedata(knowntypedata, True, [tgt_obj], s_src) + s_nonnone = s_tgt + if (s_src.is_constant() and s_src.const is None and + s_tgt.can_be_none()): + s_nonnone = s_tgt.nonnoneify() + add_knowntypedata(knowntypedata, False, [tgt_obj], s_nonnone) + + bind(obj2, obj1) + bind(obj1, obj2) + r.set_knowntypedata(knowntypedata) + return r + +def _make_cmp_annotator_default(cmp_op): + @cmp_op.register(SomeObject, SomeObject) + def default_annotate(annotator, obj1, obj2): + s_1, s_2 = annotator.annotation(obj1), annotator.annotation(obj2) + if s_1.is_immutable_constant() and s_2.is_immutable_constant(): + return immutablevalue(cmp_op.pyfunc(s_1.const, s_2.const)) + else: + return s_Bool + +for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: + _make_cmp_annotator_default(cmp_op) + class __extend__(pairtype(SomeObject, SomeObject)): def union((obj1, obj2)): @@ -51,87 +99,12 @@ inplace_floordiv.can_only_throw = [ZeroDivisionError] inplace_mod.can_only_throw = [ZeroDivisionError] - def lt((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const < obj2.const) - else: - return s_Bool - - def le((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const <= obj2.const) - else: - return s_Bool - - def eq((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const == obj2.const) - else: - return s_Bool - - def ne((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const != obj2.const) - else: - return s_Bool - - def gt((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const > obj2.const) - else: - return s_Bool - - def ge((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const >= obj2.const) - else: - return s_Bool - def cmp((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: return SomeInteger() - def is_((obj1, obj2)): - r = SomeBool() - if obj2.is_constant(): - if obj1.is_constant(): - r.const = obj1.const is obj2.const - if obj2.const is None and not obj1.can_be_none(): - r.const = False - elif obj1.is_constant(): - if obj1.const is None and not obj2.can_be_none(): - r.const = False - # XXX HACK HACK HACK - # XXX HACK HACK HACK - # XXX HACK HACK HACK - bk = getbookkeeper() - if bk is not None: # for testing - op = bk._find_current_op("is_", 2) - knowntypedata = {} - annotator = bk.annotator - - def bind(src_obj, tgt_obj, tgt_arg): - if hasattr(tgt_obj, 'is_type_of') and src_obj.is_constant(): - add_knowntypedata(knowntypedata, True, tgt_obj.is_type_of, - bk.valueoftype(src_obj.const)) - - assert annotator.binding(op.args[tgt_arg]) == tgt_obj - add_knowntypedata(knowntypedata, True, [op.args[tgt_arg]], src_obj) - - nonnone_obj = tgt_obj - if src_obj.is_constant() and src_obj.const is None and tgt_obj.can_be_none(): - nonnone_obj = tgt_obj.nonnoneify() - - add_knowntypedata(knowntypedata, False, [op.args[tgt_arg]], nonnone_obj) - - bind(obj2, obj1, 0) - bind(obj1, obj2, 1) - r.set_knowntypedata(knowntypedata) - - return r - def divmod((obj1, obj2)): return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) @@ -271,10 +244,14 @@ return SomeInteger(nonneg=int1.nonneg, knowntype=int1.knowntype) rshift.can_only_throw = [] - def _compare_helper((int1, int2), opname, operation): + +def _make_cmp_annotator_int(cmp_op): + @cmp_op.register(SomeInteger, SomeInteger) + def _compare_helper(annotator, int1, int2): r = SomeBool() - if int1.is_immutable_constant() and int2.is_immutable_constant(): - r.const = operation(int1.const, int2.const) + s_int1, s_int2 = annotator.annotation(int1), annotator.annotation(int2) + if s_int1.is_immutable_constant() and s_int2.is_immutable_constant(): + r.const = cmp_op.pyfunc(s_int1.const, s_int2.const) # # The rest of the code propagates nonneg information between # the two arguments. @@ -286,45 +263,38 @@ # nonneg then "assert x>=y" will let the annotator know that # x is nonneg too, but it will not work if y is unsigned. # - if not (rarithmetic.signedtype(int1.knowntype) and - rarithmetic.signedtype(int2.knowntype)): + if not (rarithmetic.signedtype(s_int1.knowntype) and + rarithmetic.signedtype(s_int2.knowntype)): return r knowntypedata = {} - op = getbookkeeper()._find_current_op(opname=opname, arity=2) - def tointtype(int0): - if int0.knowntype is bool: + def tointtype(s_int0): + if s_int0.knowntype is bool: return int - return int0.knowntype - if int1.nonneg and isinstance(op.args[1], Variable): - case = opname in ('lt', 'le', 'eq') - - add_knowntypedata(knowntypedata, case, [op.args[1]], - SomeInteger(nonneg=True, knowntype=tointtype(int2))) - if int2.nonneg and isinstance(op.args[0], Variable): - case = opname in ('gt', 'ge', 'eq') - add_knowntypedata(knowntypedata, case, [op.args[0]], - SomeInteger(nonneg=True, knowntype=tointtype(int1))) + return s_int0.knowntype + if s_int1.nonneg and isinstance(int2, Variable): + case = cmp_op.opname in ('lt', 'le', 'eq') + add_knowntypedata(knowntypedata, case, [int2], + SomeInteger(nonneg=True, knowntype=tointtype(s_int2))) + if s_int2.nonneg and isinstance(int1, Variable): + case = cmp_op.opname in ('gt', 'ge', 'eq') + add_knowntypedata(knowntypedata, case, [int1], + SomeInteger(nonneg=True, knowntype=tointtype(s_int1))) r.set_knowntypedata(knowntypedata) # a special case for 'x < 0' or 'x >= 0', # where 0 is a flow graph Constant # (in this case we are sure that it cannot become a r_uint later) - if (isinstance(op.args[1], Constant) and - type(op.args[1].value) is int and # filter out Symbolics - op.args[1].value == 0): - if int1.nonneg: - if opname == 'lt': + if (isinstance(int2, Constant) and + type(int2.value) is int and # filter out Symbolics + int2.value == 0): + if s_int1.nonneg: + if cmp_op.opname == 'lt': r.const = False - if opname == 'ge': + if cmp_op.opname == 'ge': r.const = True return r - def lt(intint): return intint._compare_helper('lt', operator.lt) - def le(intint): return intint._compare_helper('le', operator.le) - def eq(intint): return intint._compare_helper('eq', operator.eq) - def ne(intint): return intint._compare_helper('ne', operator.ne) - def gt(intint): return intint._compare_helper('gt', operator.gt) - def ge(intint): return intint._compare_helper('ge', operator.ge) - +for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: + _make_cmp_annotator_int(cmp_op) class __extend__(pairtype(SomeBool, SomeBool)): @@ -746,25 +716,26 @@ return SomeBuiltinMethod(bltn1.analyser, s_self, methodname=bltn1.methodname) + at op.is_.register(SomePBC, SomePBC) +def is__PBC_PBC(annotator, pbc1, pbc2): + s = is__default(annotator, pbc1, pbc2) + if not s.is_constant(): + s_pbc1 = annotator.annotation(pbc1) + s_pbc2 = annotator.annotation(pbc2) + if not s_pbc1.can_be_None or not s_pbc2.can_be_None: + for desc in s_pbc1.descriptions: + if desc in s_pbc2.descriptions: + break + else: + s.const = False # no common desc in the two sets + return s + class __extend__(pairtype(SomePBC, SomePBC)): - def union((pbc1, pbc2)): d = pbc1.descriptions.copy() d.update(pbc2.descriptions) return SomePBC(d, can_be_None = pbc1.can_be_None or pbc2.can_be_None) - def is_((pbc1, pbc2)): - thistype = pairtype(SomePBC, SomePBC) - s = super(thistype, pair(pbc1, pbc2)).is_() - if not s.is_constant(): - if not pbc1.can_be_None or not pbc2.can_be_None: - for desc in pbc1.descriptions: - if desc in pbc2.descriptions: - break - else: - s.const = False # no common desc in the two sets - return s - class __extend__(pairtype(SomeImpossibleValue, SomeObject)): def union((imp1, obj2)): return obj2 diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -89,14 +89,14 @@ newblocks = self.annotator.added_blocks if newblocks is None: newblocks = self.annotator.annotated # all of them - binding = self.annotator.binding + annotation = self.annotator.annotation for block in newblocks: for op in block.operations: if op.opname in ('simple_call', 'call_args'): yield op # some blocks are partially annotated - if binding(op.result, None) is None: + if annotation(op.result) is None: break # ignore the unannotated part for call_op in call_sites(): @@ -144,15 +144,17 @@ def consider_call_site(self, call_op): from rpython.rtyper.llannotation import SomeLLADTMeth, lltype_to_annotation - binding = self.annotator.binding - s_callable = binding(call_op.args[0]) - args_s = [binding(arg) for arg in call_op.args[1:]] + annotation = self.annotator.annotation + s_callable = annotation(call_op.args[0]) + args_s = [annotation(arg) for arg in call_op.args[1:]] if isinstance(s_callable, SomeLLADTMeth): adtmeth = s_callable s_callable = self.immutablevalue(adtmeth.func) args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): - s_result = binding(call_op.result, s_ImpossibleValue) + s_result = annotation(call_op.result) + if s_result is None: + s_result = s_ImpossibleValue args = call_op.build_args(args_s) self.consider_call_site_for_pbc(s_callable, args, s_result, call_op) @@ -500,8 +502,9 @@ # needed by some kinds of specialization. fn, block, i = self.position_key op = block.operations[i] - s_previous_result = self.annotator.binding(op.result, - s_ImpossibleValue) + s_previous_result = self.annotator.annotation(op.result) + if s_previous_result is None: + s_previous_result = s_ImpossibleValue else: if emulated is True: whence = None diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -857,7 +857,11 @@ s = a.build_types(snippet.harmonic, [int]) assert s.knowntype == float # check that the list produced by range() is not mutated or resized - for s_value in a.bindings.values(): + graph = graphof(a, snippet.harmonic) + all_vars = set().union(*[block.getvariables() for block in graph.iterblocks()]) + print all_vars + for var in all_vars: + s_value = var.annotation if isinstance(s_value, annmodel.SomeList): assert not s_value.listdef.listitem.resized assert not s_value.listdef.listitem.mutated @@ -2767,8 +2771,8 @@ a = self.RPythonAnnotator() a.build_types(f, []) v1, v2 = graphof(a, readout).getargs() - assert not a.bindings[v1].is_constant() - assert not a.bindings[v2].is_constant() + assert not a.binding(v1).is_constant() + assert not a.binding(v2).is_constant() def test_prebuilt_mutables_dont_use_eq(self): # test that __eq__ is not called during annotation, at least diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -20,25 +20,6 @@ class C(object): pass -class DummyClassDef: - def __init__(self, cls=C): - self.cls = cls - self.name = cls.__name__ - -si0 = SomeInstance(DummyClassDef(), True) -si1 = SomeInstance(DummyClassDef()) -sTrue = SomeBool() -sTrue.const = True -sFalse = SomeBool() -sFalse.const = False - -def test_is_None(): - assert pair(s_None, s_None).is_() == sTrue - assert pair(si1, s_None).is_() == sFalse - assert pair(si0, s_None).is_() != sTrue - assert pair(si0, s_None).is_() != sFalse - assert pair(si0, s_None).is_() == SomeBool() - def test_equality(): assert s1 != s2 != s3 != s4 != s5 != s6 assert s1 == SomeType() diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -19,19 +19,41 @@ UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) +UNARY_OPERATIONS.remove('contains') + at op.type.register(SomeObject) +def type_SomeObject(annotator, arg): + r = SomeType() + r.is_type_of = [arg] + return r + + at op.bool.register(SomeObject) +def bool_SomeObject(annotator, obj): + r = SomeBool() + annotator.annotation(obj).bool_behavior(r) + s_nonnone_obj = annotator.annotation(obj) + if s_nonnone_obj.can_be_none(): + s_nonnone_obj = s_nonnone_obj.nonnoneify() + knowntypedata = {} + add_knowntypedata(knowntypedata, True, [obj], s_nonnone_obj) + r.set_knowntypedata(knowntypedata) + return r + + at op.contains.register(SomeObject) +def contains_SomeObject(annotator, obj, element): + return s_Bool +contains_SomeObject.can_only_throw = [] + + at op.simple_call.register(SomeObject) +def simple_call_SomeObject(annotator, func, *args): + return annotator.annotation(func).call(simple_args([annotator.annotation(arg) for arg in args])) + + at op.call_args.register(SomeObject) +def call_args(annotator, func, *args): + return annotator.annotation(func).call(complex_args([annotator.annotation(arg) for arg in args])) class __extend__(SomeObject): - def type(self, *moreargs): - if moreargs: - raise Exception('type() called with more than one argument') - r = SomeType() - bk = getbookkeeper() - op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=self) - r.is_type_of = [op.args[0]] - return r - def issubtype(self, s_cls): if hasattr(self, 'is_type_of'): vars = self.is_type_of @@ -53,21 +75,6 @@ if s_len.is_immutable_constant(): s.const = s_len.const > 0 - def bool(s_obj): - r = SomeBool() - s_obj.bool_behavior(r) - - bk = getbookkeeper() - knowntypedata = {} - op = bk._find_current_op(opname="bool", arity=1) - arg = op.args[0] - s_nonnone_obj = s_obj - if s_obj.can_be_none(): - s_nonnone_obj = s_obj.nonnoneify() - add_knowntypedata(knowntypedata, True, [arg], s_nonnone_obj) - r.set_knowntypedata(knowntypedata) - return r - def hash(self): raise AnnotatorError("cannot use hash() in RPython") @@ -133,19 +140,9 @@ def bind_callables_under(self, classdef, name): return self # default unbound __get__ implementation - def simple_call(self, *args_s): - return self.call(simple_args(args_s)) - - def call_args(self, *args_s): - return self.call(complex_args(args_s)) - def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") - def op_contains(self, s_element): - return s_Bool - op_contains.can_only_throw = [] - def hint(self, *args_s): return self @@ -249,6 +246,12 @@ items = self.items[s_start.const:s_stop.const] return SomeTuple(items) + at op.contains.register(SomeList) +def contains_SomeList(annotator, obj, element): + annotator.annotation(obj).listdef.generalize(annotator.annotation(element)) + return s_Bool +contains_SomeList.can_only_throw = [] + class __extend__(SomeList): @@ -296,11 +299,6 @@ def getanyitem(self): return self.listdef.read_item() - def op_contains(self, s_element): - self.listdef.generalize(s_element) - return s_Bool - op_contains.can_only_throw = [] - def hint(self, *args_s): hints = args_s[-1].const if 'maxlength' in hints: @@ -340,6 +338,21 @@ raise AnnotatorError("%s: not proven to have non-negative stop" % error) +def _can_only_throw(s_dct, *ignore): + if s_dct.dictdef.dictkey.custom_eq_hash: + return None # r_dict: can throw anything + return [] # else: no possible exception + + at op.contains.register(SomeDict) +def contains_SomeDict(annotator, dct, element): + annotator.annotation(dct).dictdef.generalize_key(annotator.annotation(element)) + if annotator.annotation(dct)._is_empty(): + s_bool = SomeBool() + s_bool.const = False + return s_bool + return s_Bool +contains_SomeDict.can_only_throw = _can_only_throw + class __extend__(SomeDict): def _is_empty(self): @@ -421,19 +434,19 @@ self.dictdef.generalize_value(s_dfl) return self.dictdef.read_value() - def _can_only_throw(self, *ignore): - if self.dictdef.dictkey.custom_eq_hash: - return None # r_dict: can throw anything - return [] # else: no possible exception - - def op_contains(self, s_element): - self.dictdef.generalize_key(s_element) - if self._is_empty(): - s_bool = SomeBool() - s_bool.const = False - return s_bool - return s_Bool - op_contains.can_only_throw = _can_only_throw + at op.contains.register(SomeString) + at op.contains.register(SomeUnicodeString) +def contains_String(annotator, string, char): + if annotator.annotation(char).is_constant() and annotator.annotation(char).const == "\0": + r = SomeBool() + knowntypedata = {} + add_knowntypedata(knowntypedata, False, [string], + annotator.annotation(string).nonnulify()) + r.set_knowntypedata(knowntypedata) + return r + else: + return contains_SomeObject(annotator, string, char) +contains_String.can_only_throw = [] class __extend__(SomeString, @@ -508,19 +521,6 @@ result = self.basestringclass(no_nul=self.no_nul) return result - def op_contains(self, s_element): - if s_element.is_constant() and s_element.const == "\0": - r = SomeBool() - bk = getbookkeeper() - op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - knowntypedata = {} - add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) - r.set_knowntypedata(knowntypedata) - return r - else: - return SomeObject.op_contains(self, s_element) - op_contains.can_only_throw = [] - def method_format(self, *args): raise AnnotatorError("Method format() is not RPython") @@ -709,9 +709,6 @@ return self._emulate_call('__setslice__', s_start, s_stop, s_iterable) class __extend__(SomeBuiltin): - def simple_call(self, *args): - return self.analyser(*args) - def call(self, args, implicit_init=False): args_s, kwds = args.unpack() # prefix keyword arguments with 's_' diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -10,29 +10,6 @@ from rpython.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -""" - memory size before and after introduction of __slots__ - using targetpypymain with -no-c - - slottified annotation ann+genc - ------------------------------------------- - nothing 321 MB 442 MB - Var/Const/SpaceOp 205 MB 325 MB - + Link 189 MB 311 MB - + Block 185 MB 304 MB - - Dropping Variable.instances and using - just an instancenames dict brought - annotation down to 160 MB. - Computing the Variable.renamed attribute - and dropping Variable.instancenames - got annotation down to 109 MB. - Probably an effect of less fragmentation. -""" - -__metaclass__ = type - - class FunctionGraph(object): def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) @@ -273,7 +250,7 @@ class Variable(object): - __slots__ = ["_name", "_nr", "concretetype"] + __slots__ = ["_name", "_nr", "annotation", "concretetype"] dummyname = 'v' namesdict = {dummyname: (dummyname, 0)} @@ -296,6 +273,7 @@ def __init__(self, name=None): self._name = self.dummyname self._nr = -1 + self.annotation = None # numbers are bound lazily, when the name is requested if name is not None: self.rename(name) @@ -334,6 +312,15 @@ def foldable(self): return False + def copy(self): + """Make a copy of the Variable, preserving annotations and concretetype.""" + newvar = Variable(self) + newvar.annotation = self.annotation + if hasattr(self, 'concretetype'): + newvar.concretetype = self.concretetype + return newvar + + class Constant(Hashable): __slots__ = ["concretetype"] diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -7,13 +7,14 @@ import operator import sys import types -from rpython.tool.pairtype import pair +from rpython.tool.pairtype import pair, DoubleDispatchRegistry from rpython.rlib.unroll import unrolling_iterable, _unroller from rpython.tool.sourcetools import compile2 from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc -from rpython.annotator.model import SomeTuple +from rpython.annotator.model import ( + SomeTuple, AnnotatorError, read_can_only_throw) from rpython.annotator.argument import ArgumentsForTranslation from rpython.flowspace.specialcase import SPECIAL_CASES @@ -54,6 +55,11 @@ type.__init__(cls, name, bases, attrdict) if hasattr(cls, 'opname'): setattr(op, cls.opname, cls) + if cls.dispatch == 1: + cls._registry = {} + elif cls.dispatch == 2: + cls._registry = DoubleDispatchRegistry() + class HLOperation(SpaceOperation): __metaclass__ = HLOperationMeta @@ -90,11 +96,13 @@ def constfold(self): return None - def consider(self, annotator, *argcells): - consider_meth = getattr(annotator, 'consider_op_' + self.opname, None) - if not consider_meth: - raise Exception("unknown op: %r" % op) - return consider_meth(*argcells) + def consider(self, annotator, *args): + args_s = [annotator.annotation(arg) for arg in args] + spec = type(self).get_specialization(*args_s) + return spec(annotator, *args) + + def get_can_only_throw(self, annotator): + return None class PureOperation(HLOperation): pure = True @@ -141,16 +149,72 @@ class SingleDispatchMixin(object): dispatch = 1 - def consider(self, annotator, arg, *other_args): - impl = getattr(arg, self.opname) - return impl(*other_args) + @classmethod + def register(cls, Some_cls): + def decorator(func): + cls._registry[Some_cls] = func + return func + return decorator + + @classmethod + def _dispatch(cls, Some_cls): + for c in Some_cls.__mro__: + try: + return cls._registry[c] + except KeyError: + pass + raise AnnotatorError("Unknown operation") + + def get_can_only_throw(self, annotator): + args_s = [annotator.annotation(v) for v in self.args] + spec = type(self).get_specialization(*args_s) + return read_can_only_throw(spec, args_s[0]) + + @classmethod + def get_specialization(cls, s_arg, *_ignored): + try: + impl = getattr(s_arg, cls.opname) + + def specialized(annotator, arg, *other_args): + return impl(*[annotator.annotation(x) for x in other_args]) + try: + specialized.can_only_throw = impl.can_only_throw + except AttributeError: + pass + return specialized + except AttributeError: + return cls._dispatch(type(s_arg)) + class DoubleDispatchMixin(object): dispatch = 2 - def consider(self, annotator, arg1, arg2, *other_args): - impl = getattr(pair(arg1, arg2), self.opname) - return impl(*other_args) + @classmethod + def register(cls, Some1, Some2): + def decorator(func): + cls._registry[Some1, Some2] = func + return func + return decorator + + @classmethod + def get_specialization(cls, s_arg1, s_arg2, *_ignored): + try: + impl = getattr(pair(s_arg1, s_arg2), cls.opname) + + def specialized(annotator, arg1, arg2, *other_args): + return impl(*[annotator.annotation(x) for x in other_args]) + try: + specialized.can_only_throw = impl.can_only_throw + except AttributeError: + pass + return specialized + except AttributeError: + return cls._registry[type(s_arg1), type(s_arg2)] + + def get_can_only_throw(self, annotator): + args_s = [annotator.annotation(v) for v in self.args] + spec = type(self).get_specialization(*args_s) + return read_can_only_throw(spec, args_s[0], args_s[1]) def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): @@ -368,14 +432,15 @@ add_operator('newslice', 3) add_operator('hint', None, dispatch=1) -class Contains(PureOperation): +class Contains(SingleDispatchMixin, PureOperation): opname = 'contains' arity = 2 pyfunc = staticmethod(operator.contains) - # XXX "contains" clash with SomeObject method - def consider(self, annotator, seq, elem): - return seq.op_contains(elem) + # XXX "contains" clashes with SomeObject method + @classmethod + def get_specialization(cls, s_seq, s_elem): + return cls._dispatch(type(s_seq)) class NewDict(HLOperation): @@ -392,7 +457,7 @@ canraise = [] def consider(self, annotator, *args): - return SomeTuple(items=args) + return SomeTuple(items=[annotator.annotation(arg) for arg in args]) class NewList(HLOperation): @@ -400,7 +465,7 @@ canraise = [] def consider(self, annotator, *args): - return annotator.bookkeeper.newlist(*args) + return annotator.bookkeeper.newlist(*[annotator.annotation(arg) for arg in args]) class Pow(PureOperation): diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -10,7 +10,7 @@ BaseFrameworkGCTransformer, BaseRootWalker) from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import copyvar, varoftype +from rpython.translator.unsimplify import varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo import sys @@ -140,7 +140,7 @@ block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): - v = copyvar(None, v) + v = v.copy() if isinstance(v.concretetype, lltype.Ptr): w = varoftype(llmemory.Address) else: diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -19,7 +19,7 @@ def sigof(a, f): # returns [param1, param2, ..., ret] g = graphof(a.translator, f) - return [a.bindings[v] for v in g.startblock.inputargs] + [a.bindings[g.getreturnvar()]] + return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())] def getsig(f, policy=None): a = annotate_at(f, policy=policy) diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -19,7 +19,7 @@ def getrresult(rtyper, graph): """Return the repr of the result variable of the 'graph'.""" - if graph.getreturnvar() in rtyper.annotator.bindings: + if graph.getreturnvar().annotation is not None: return rtyper.bindingrepr(graph.getreturnvar()) else: return lltype.Void diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -17,7 +17,6 @@ from rpython.annotator import model as annmodel, unaryop, binaryop from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation -from rpython.annotator.annrpython import FAIL from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy from rpython.rtyper.error import TyperError @@ -152,8 +151,12 @@ assert result is not None # recursive getrepr()! return result - def binding(self, var, default=FAIL): - s_obj = self.annotator.binding(var, default) + def annotation(self, var): + s_obj = self.annotator.annotation(var) + return s_obj + + def binding(self, var): + s_obj = self.annotator.binding(var) return s_obj def bindingrepr(self, var): @@ -493,7 +496,7 @@ hop.r_result, op.opname, resulttype)) # figure out if the resultvar is a completely fresh Variable or not if (isinstance(resultvar, Variable) and - resultvar not in self.annotator.bindings and + resultvar.annotation is None and resultvar not in varmapping): # fresh Variable: rename it to the previously existing op.result varmapping[resultvar] = op.result @@ -635,7 +638,7 @@ ARG_GCSTRUCT = GCSTRUCT args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] graph = self.annotate_helper(func, args_s) - s = self.annotator.binding(graph.getreturnvar()) + s = self.annotation(graph.getreturnvar()) if (not isinstance(s, SomePtr) or s.ll_ptrtype != Ptr(RuntimeTypeInfo)): raise TyperError("runtime type info function %r returns %r, " @@ -882,7 +885,9 @@ newargs_v = [] for v in args_v: if v.concretetype is Void: - s_value = rtyper.binding(v, default=annmodel.s_None) + s_value = rtyper.annotation(v) + if s_value is None: + s_value = annmodel.s_None if not s_value.is_constant(): raise TyperError("non-constant variable of type Void") if not isinstance(s_value, (annmodel.SomePBC, annmodel.SomeNone)): diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -106,7 +106,7 @@ def format_simple_call(annotator, oper, msg): msg.append("Occurred processing the following simple_call:") try: - descs = annotator.bindings[oper.args[0]].descriptions + descs = annotator.binding(oper.args[0]).descriptions except (KeyError, AttributeError), e: msg.append(" (%s getting at the binding!)" % ( e.__class__.__name__,)) diff --git a/rpython/tool/pairtype.py b/rpython/tool/pairtype.py --- a/rpython/tool/pairtype.py +++ b/rpython/tool/pairtype.py @@ -61,3 +61,36 @@ bases = tuple(bases1 + bases2) or (tuple,) # 'tuple': ultimate base pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {}) return pair + +def pairmro(cls1, cls2): + """ + Return the resolution order on pairs of types for double dispatch. + + This order is compatible with the mro of pairtype(cls1, cls2). + """ + for base2 in cls2.__mro__: + for base1 in cls1.__mro__: + yield (base1, base2) + +class DoubleDispatchRegistry(object): + """ + A mapping of pairs of types to arbitrary objects respecting inheritance + """ + def __init__(self): + self._registry = {} + self._cache = {} + + def __getitem__(self, clspair): + try: + return self._cache[clspair] + except KeyError: + cls1, cls2 = clspair + for c1, c2 in pairmro(cls1, cls2): + if (c1, c2) in self._cache: + return self._cache[(c1, c2)] + else: + raise + + def __setitem__(self, clspair, value): + self._registry[clspair] = value + self._cache = self._registry.copy() diff --git a/rpython/tool/test/test_pairtype.py b/rpython/tool/test/test_pairtype.py --- a/rpython/tool/test/test_pairtype.py +++ b/rpython/tool/test/test_pairtype.py @@ -1,7 +1,7 @@ +from rpython.tool.pairtype import ( + pairtype, pair, extendabletype, pairmro, DoubleDispatchRegistry) -from rpython.tool.pairtype import pairtype, pair, extendabletype - -def test_binop(): +def test_binop(): ### Binary operation example class __extend__(pairtype(int, int)): def add((x, y)): @@ -13,16 +13,16 @@ def add((x, y)): return 'bool: %s+%s' % (x, y) - assert pair(3,4).add() == 'integer: 3+4' - assert pair(3,4).sub() == 'integer: 3-4' - assert pair(3,True).add() == 'integer: 3+True' - assert pair(3,True).sub() == 'integer: 3-True' - assert pair(False,4).add() == 'integer: False+4' - assert pair(False,4).sub() == 'integer: False-4' - assert pair(False,True).add() == 'bool: False+True' - assert pair(False,True).sub() == 'integer: False-True' + assert pair(3, 4).add() == 'integer: 3+4' + assert pair(3, 4).sub() == 'integer: 3-4' + assert pair(3, True).add() == 'integer: 3+True' + assert pair(3, True).sub() == 'integer: 3-True' + assert pair(False, 4).add() == 'integer: False+4' + assert pair(False, 4).sub() == 'integer: False-4' + assert pair(False, True).add() == 'bool: False+True' + assert pair(False, True).sub() == 'integer: False-True' -def test_somebuiltin(): +def test_somebuiltin(): ### Operation on built-in types class MiniPickler: def __init__(self): @@ -48,7 +48,7 @@ pair(p, [1, 2, ['hello', 3]]).write() assert p.data == ['I1', 'I2', 'Shello', 'I3', 'L2', 'L3'] -def test_some_multimethod(): +def test_some_multimethod(): ### Another multimethod example class Block: def __init__(self, exit): @@ -57,7 +57,7 @@ pass class Switch: pass - + class C_Generator: def __init__(self): self.lines = [] @@ -78,7 +78,7 @@ g = C_Generator() pair(g, Block(Switch())).emit(['v1', 'v2']) - assert g.lines == ["C code for block", "switch (v5) { ... }"] + assert g.lines == ["C code for block", "switch (v5) { ... }"] class Lisp_Generator: def __init__(self): @@ -95,16 +95,37 @@ def test_multiple_extend(): class A: __metaclass__ = extendabletype + class B: __metaclass__ = extendabletype - class __extend__(A,B): - + class __extend__(A, B): def f(self): pass assert hasattr(A, 'f') assert hasattr(B, 'f') - - +def test_pairmro(): + class A(object): pass + class A2(A): pass + class A3(A2): pass + class B(object): pass + class B2(B): pass + parent_pairtypes = pairtype(A3, B2).__mro__[:-2] + assert (tuple(pairtype(a, b) for a, b in pairmro(A3, B2)) == parent_pairtypes) + +def test_doubledispatch(): + class A(object): pass + class A2(A): pass + class A3(A2): pass + class B(object): pass + class B2(B): pass + reg = DoubleDispatchRegistry() + reg[object, object] = "default" + assert reg[A3, B2] == "default" + reg[A2, B2] = "A2-B2" + assert reg[A, B2] == "default" + assert reg[A3, B2] == "A2-B2" + reg[A3, B] = "A3-B" + assert reg[A3, B2] == "A2-B2" # note that A2,B2 wins over A3,B diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -8,7 +8,7 @@ from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.support import log, find_loop_blocks from rpython.translator.simplify import join_blocks, cleanup_graph, get_graph -from rpython.translator.unsimplify import copyvar, split_block +from rpython.translator.unsimplify import split_block class CannotInline(Exception): @@ -236,14 +236,13 @@ if isinstance(var, Constant): return var if var not in self.varmap: - self.varmap[var] = copyvar(None, var) + self.varmap[var] = var.copy() return self.varmap[var] def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] - result = [copyvar(None, var) - for var in self.original_passon_vars] + result = [var.copy() for var in self.original_passon_vars] self._passon_vars[cache_key] = result return result @@ -362,8 +361,8 @@ exc_match.concretetype = typeOf(exc_match.value) blocks = [] for i, link in enumerate(afterblock.exits[1:]): - etype = copyvar(None, copiedexceptblock.inputargs[0]) - evalue = copyvar(None, copiedexceptblock.inputargs[1]) + etype = copiedexceptblock.inputargs[0].copy() + evalue = copiedexceptblock.inputargs[1].copy() passon_vars = self.passon_vars(i) block = Block([etype, evalue] + passon_vars) res = Variable() diff --git a/rpython/translator/backendopt/ssa.py b/rpython/translator/backendopt/ssa.py --- a/rpython/translator/backendopt/ssa.py +++ b/rpython/translator/backendopt/ssa.py @@ -158,8 +158,6 @@ 'graph_or_blocks' can be a graph, or just a dict that lists some blocks from a graph, as follows: {block: reachable-from-outside-flag}. """ - from rpython.translator.unsimplify import copyvar - entrymap = mkinsideentrymap(graph_or_blocks) builder = DataFlowFamilyBuilder(graph_or_blocks) variable_families = builder.get_variable_families() @@ -203,7 +201,7 @@ except KeyError: raise Exception("SSA_to_SSI failed: no way to give a value to" " %r in %r" % (v, block)) - w = copyvar(annotator, v) + w = v.copy() variable_families.union(v, w) block.renamevariables({v: w}) block.inputargs.append(w) diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -1,5 +1,5 @@ from rpython.translator.simplify import join_blocks, cleanup_graph -from rpython.translator.unsimplify import copyvar, varoftype +from rpython.translator.unsimplify import varoftype from rpython.translator.unsimplify import insert_empty_block, split_block from rpython.translator.backendopt import canraise, inline from rpython.flowspace.model import Block, Constant, Variable, Link, \ @@ -305,8 +305,7 @@ reraise = self.comes_from_last_exception(entrymap, link) result = Variable() result.concretetype = lltype.Void - block = Block([copyvar(None, v) - for v in graph.exceptblock.inputargs]) + block = Block([v.copy() for v in graph.exceptblock.inputargs]) if reraise: block.operations = [ SpaceOperation("direct_call", @@ -345,7 +344,7 @@ inlined, the correct exception matching blocks are produced.""" # XXX slightly annoying: construct a graph by hand # but better than the alternative - result = copyvar(None, op.result) + result = op.result.copy() opargs = [] inputargs = [] callargs = [] @@ -435,7 +434,7 @@ result_i = l0.args.index(v_result) v_result_after = normalafterblock.inputargs[result_i] else: - v_result_after = copyvar(None, v_result) + v_result_after = v_result.copy() l0.args.append(v_result) normalafterblock.inputargs.append(v_result_after) if true_zero: diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -33,7 +33,7 @@ try: for block in g.iterblocks(): for v in block.getvariables(): - s = annotator.binding(v, None) + s = annotator.annotation(v) if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: raise Found except Found: @@ -44,8 +44,8 @@ annotator = translator.annotator for graph in translator.graphs: et, ev = graph.exceptblock.inputargs - s_et = annotator.binding(et, None) - s_ev = annotator.binding(ev, None) + s_et = annotator.annotation(et) + s_ev = annotator.annotation(ev) if s_et: if s_et.knowntype == type: if s_et.__class__ == annmodel.SomeType: diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -106,12 +106,6 @@ self.source = make_dot_graphs(name, gs, target=None) # make the dictionary of links -- one per annotated variable self.current_value = {} - if self.annotator: - for var, s_value in self.annotator.bindings.items(): - info = '%s: %s' % (var.name, s_value) - annotationcolor = getattr(s_value, 'annotationcolor', None) - self.links[var.name] = info, annotationcolor - self.current_value[var.name] = s_value #from rpython.jit.hintannotator.annotator import HintAnnotator #if isinstance(self.annotator, HintAnnotator): @@ -128,6 +122,12 @@ for v in link.getextravars(): vars[v] = True for var in vars: + s_value = var.annotation + if s_value is not None: + info = '%s: %s' % (var.name, s_value) + annotationcolor = getattr(s_value, 'annotationcolor', None) + self.links[var.name] = info, annotationcolor + self.current_value[var.name] = s_value if hasattr(var, 'concretetype'): #info = self.links.get(var.name, var.name) #info = '(%s) %s' % (var.concretetype, info) diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -89,8 +89,8 @@ for i in range(len(block.operations)): op = block.operations[i] if op.opname == 'mul': - s0 = self.binding(op.args[0], None) - s1 = self.binding(op.args[1], None) + s0 = self.annotation(op.args[0]) + s1 = self.annotation(op.args[1]) if (isinstance(s0, annmodel.SomeChar) and isinstance(s1, annmodel.SomeInteger)): mul_sources[op.result] = op.args[0], op.args[1] @@ -124,14 +124,14 @@ elif op.opname == 'contains' and op.args[0] in newlist_sources: items = {} for v in newlist_sources[op.args[0]]: - s = self.binding(v) + s = self.annotation(v) if not s.is_immutable_constant(): break items[s.const] = None else: # all arguments of the newlist are annotation constants op.args[0] = Constant(items) - s_dict = self.binding(op.args[0]) + s_dict = self.annotation(op.args[0]) s_dict.dictdef.generalize_key(self.binding(op.args[1])) @@ -168,9 +168,9 @@ "Fix a block whose end can never be reached at run-time." # search the operation that cannot succeed can_succeed = [op for op in block.operations - if op.result in self.bindings] + if op.result.annotation is not None] cannot_succeed = [op for op in block.operations - if op.result not in self.bindings] + if op.result.annotation is None] n = len(can_succeed) # check consistency assert can_succeed == block.operations[:n] @@ -178,8 +178,7 @@ assert 0 <= n < len(block.operations) # chop off the unreachable end of the block del block.operations[n+1:] - s_impossible = annmodel.SomeImpossibleValue() - self.bindings[block.operations[n].result] = s_impossible + self.setbinding(block.operations[n].result, annmodel.s_ImpossibleValue) # insert the equivalent of 'raise AssertionError' graph = self.annotated[block] msg = "Call to %r should have raised an exception" % (getattr(graph, 'func', None),) diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -2,16 +2,6 @@ SpaceOperation, c_last_exception, checkgraph) -def copyvar(annotator, v): - """Make a copy of the Variable v, preserving annotations and concretetype.""" - assert isinstance(v, Variable) - newvar = Variable(v) - if annotator is not None and v in annotator.bindings: - annotator.transfer_binding(newvar, v) - if hasattr(v, 'concretetype'): - newvar.concretetype = v.concretetype - return newvar - def varoftype(concretetype, name=None): var = Variable(name) var.concretetype = concretetype @@ -31,7 +21,7 @@ vars = [v for v, keep in vars.items() if keep] mapping = {} for v in vars: - mapping[v] = copyvar(annotator, v) + mapping[v] = v.copy() newblock = Block(vars) newblock.operations.extend(newops) newblock.closeblock(Link(link.args, link.target)) @@ -41,7 +31,7 @@ return newblock def insert_empty_startblock(annotator, graph): - vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] + vars = [v.copy() for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) graph.startblock = newblock @@ -72,7 +62,7 @@ if var in vars_produced_in_new_block: return var if var not in varmap: - varmap[var] = copyvar(annotator, var) + varmap[var] = var.copy() return varmap[var] moved_operations = block.operations[index:] new_moved_ops = [] @@ -146,7 +136,7 @@ annhelper.finish() entry_point = translator.entry_point_graph - args = [copyvar(translator.annotator, v) for v in entry_point.getargs()] + args = [v.copy() for v in entry_point.getargs()] extrablock = Block(args) v_none = varoftype(lltype.Void) newop = SpaceOperation('direct_call', [c_initial_func], v_none) @@ -169,7 +159,7 @@ annhelper.finish() entry_point = translator.entry_point_graph - v = copyvar(translator.annotator, entry_point.getreturnvar()) + v = entry_point.getreturnvar().copy() extrablock = Block([v]) v_none = varoftype(lltype.Void) newop = SpaceOperation('direct_call', [c_final_func], v_none) From noreply at buildbot.pypy.org Fri Oct 10 18:33:11 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:33:11 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: close branch Message-ID: <20141010163311.3D16D1D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r73890:c6314683ba8d Date: 2014-10-10 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/c6314683ba8d/ Log: close branch From noreply at buildbot.pypy.org Fri Oct 10 18:41:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:41:44 +0200 (CEST) Subject: [pypy-commit] pypy inline-earlier: move search_for_calls() out of BaseInliner, since it's only used by Inliner Message-ID: <20141010164144.E7B271D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: inline-earlier Changeset: r73891:44de8be2cd36 Date: 2014-10-09 02:55 +0100 http://bitbucket.org/pypy/pypy/changeset/44de8be2cd36/ Log: move search_for_calls() out of BaseInliner, since it's only used by Inliner diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -210,26 +210,6 @@ self.entrymap = mkentrymap(self.graph_to_inline) self.do_inline(block, index_operation) - def search_for_calls(self, block): - d = {} - for i, op in enumerate(block.operations): - if op.opname == "direct_call": - funcobj = op.args[0].value._obj - else: - continue - graph = getattr(funcobj, 'graph', None) - # accept a function or a graph as 'inline_func' - if (graph is self.inline_func or - getattr(funcobj, '_callable', None) is self.inline_func): - d[i] = graph - if d: - self.block_to_index[block] = d - else: - try: - del self.block_to_index[block] - except KeyError: - pass - def get_new_name(self, var): if var is None: return None @@ -464,6 +444,27 @@ for g, block, i in callsites: self.block_to_index.setdefault(block, {})[i] = g + def search_for_calls(self, block): + d = {} + for i, op in enumerate(block.operations): + if op.opname == "direct_call": + funcobj = op.args[0].value._obj + else: + continue + graph = getattr(funcobj, 'graph', None) + # accept a function or a graph as 'inline_func' + if (graph is self.inline_func or + getattr(funcobj, '_callable', None) is self.inline_func): + d[i] = graph + if d: + self.block_to_index[block] = d + else: + try: + del self.block_to_index[block] + except KeyError: + pass + + class OneShotInliner(BaseInliner): def search_for_calls(self, block): pass From noreply at buildbot.pypy.org Fri Oct 10 18:41:46 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:41:46 +0200 (CEST) Subject: [pypy-commit] pypy inline-earlier: extract some rtyper specific code into a separate method Message-ID: <20141010164146.314531D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: inline-earlier Changeset: r73892:655da501d2b3 Date: 2014-10-09 03:58 +0100 http://bitbucket.org/pypy/pypy/changeset/655da501d2b3/ Log: extract some rtyper specific code into a separate method diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -304,32 +304,37 @@ # if self.graph.exceptblock was never used before a2.concretetype = a1.concretetype + def exc_match(self, VALUE, llexitcase): + from rpython.rtyper.lltypesystem.rclass import getclassrepr + if VALUE not in self.lltype_to_classdef: + return False + classdef = self.lltype_to_classdef[VALUE] + rtyper = self.translator.rtyper + excdata = rtyper.exceptiondata + exc_match = excdata.fn_exception_match + classrepr = getclassrepr(rtyper, classdef) + vtable = classrepr.getruntime(excdata.lltype_of_exception_type) + return exc_match(vtable, llexitcase) + def rewire_exceptblock_with_guard(self, afterblock, copiedexceptblock): # this rewiring does not always succeed. in the cases where it doesn't # there will be generic code inserted - from rpython.rtyper.lltypesystem import rclass - excdata = self.translator.rtyper.exceptiondata - exc_match = excdata.fn_exception_match for link in self.entrymap[self.graph_to_inline.exceptblock]: if link.prevblock.exits[0] is not link: continue copiedblock = self.copy_block(link.prevblock) VALUE, copiedlink = _find_exception_type(copiedblock) - #print copiedblock.operations - if VALUE is None or VALUE not in self.lltype_to_classdef: + if VALUE is None: continue - classdef = self.lltype_to_classdef[VALUE] - rtyper = self.translator.rtyper - classrepr = rclass.getclassrepr(rtyper, classdef) - vtable = classrepr.getruntime(excdata.lltype_of_exception_type) var_etype = copiedlink.args[0] var_evalue = copiedlink.args[1] for exceptionlink in afterblock.exits[1:]: - if exc_match(vtable, exceptionlink.llexitcase): + if self.exc_match(VALUE, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( - exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) + exceptionlink, link.prevblock, var_etype, var_evalue, + afterblock, passon_vars) copiedlink.args = linkargs break From noreply at buildbot.pypy.org Fri Oct 10 18:41:47 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:41:47 +0200 (CEST) Subject: [pypy-commit] pypy inline-earlier: kill unused class CanRaise Message-ID: <20141010164147.4E2861D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: inline-earlier Changeset: r73893:c602f802c63f Date: 2014-10-10 05:03 +0100 http://bitbucket.org/pypy/pypy/changeset/c602f802c63f/ Log: kill unused class CanRaise diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -15,11 +15,6 @@ pass -class CanRaise(object): - def __init__(self, can_raise): - self.can_raise = can_raise - - def collect_called_graphs(graph, translator): graphs_or_something = set() for block in graph.iterblocks(): @@ -126,9 +121,6 @@ if isinstance(graph_or_something, FunctionGraph): if does_raise_directly(graph_or_something, raise_analyzer): return True - elif isinstance(graph_or_something, CanRaise): - if graph_or_something.can_raise: - return True else: return True # conservatively for block in from_graph.iterblocks(): From noreply at buildbot.pypy.org Fri Oct 10 18:41:48 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 10 Oct 2014 18:41:48 +0200 (CEST) Subject: [pypy-commit] pypy inline-earlier: deJavaise r/t/b/inline.py Message-ID: <20141010164148.69CC91D25A7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: inline-earlier Changeset: r73894:4834be503b4d Date: 2014-10-10 05:20 +0100 http://bitbucket.org/pypy/pypy/changeset/4834be503b4d/ Log: deJavaise r/t/b/inline.py diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -125,14 +125,14 @@ if not self.prepared: raise Exception("Need to call prepare_inline_helpers first") if self.inline: - raise_analyzer = RaiseAnalyzer(self.translator) + can_raise = RaiseAnalyzer(self.translator).can_raise to_enum = self.graph_dependencies.get(graph, self.graphs_to_inline) must_constfold = False for inline_graph in to_enum: try: inline.inline_function(self.translator, inline_graph, graph, self.lltype_to_classdef, - raise_analyzer, + can_raise, cleanup=False) must_constfold = True except inline.CannotInline, e: diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -68,16 +68,16 @@ return False def inline_function(translator, inline_func, graph, lltype_to_classdef, - raise_analyzer, call_count_pred=None, cleanup=True): + can_raise, call_count_pred=None, cleanup=True): inliner = Inliner(translator, graph, inline_func, lltype_to_classdef, - raise_analyzer=raise_analyzer, + can_raise=can_raise, call_count_pred=call_count_pred, cleanup=cleanup) return inliner.inline_all() def simple_inline_function(translator, inline_func, graph): inliner = Inliner(translator, graph, inline_func, translator.rtyper.lltype_to_classdef_mapping(), - raise_analyzer=RaiseAnalyzer(translator)) + can_raise=RaiseAnalyzer(translator).can_raise) return inliner.inline_all() @@ -101,25 +101,24 @@ elif op.opname == "malloc" and op.result is currvar: return Ptr(op.args[0].value), block.exits[0] -def does_raise_directly(graph, raise_analyzer): +def does_raise_directly(graph, can_raise): """ this function checks, whether graph contains operations which can raise and which are not exception guarded """ for block in graph.iterblocks(): if block is graph.exceptblock: return True # the except block is reachable if block.exitswitch == c_last_exception: - consider_ops_to = -1 + ops = block.operations[:-1] else: - consider_ops_to = len(block.operations) - for op in block.operations[:consider_ops_to]: - if raise_analyzer.can_raise(op): - return True + ops = block.operations + if any(can_raise(op) for op in ops): + return True return False -def any_call_to_raising_graphs(from_graph, translator, raise_analyzer): +def any_call_to_raising_graphs(from_graph, translator, can_raise): for graph_or_something in collect_called_graphs(from_graph, translator): if isinstance(graph_or_something, FunctionGraph): - if does_raise_directly(graph_or_something, raise_analyzer): + if does_raise_directly(graph_or_something, can_raise): return True else: return True # conservatively @@ -129,7 +128,7 @@ else: consider_ops_to = len(block.operations) for op in block.operations[:consider_ops_to]: - if raise_analyzer.can_raise(op): + if can_raise(op): return True return False @@ -137,7 +136,7 @@ def __init__(self, translator, graph, lltype_to_classdef, inline_guarded_calls=False, inline_guarded_calls_no_matter_what=False, - raise_analyzer=None, + can_raise=None, call_count_pred=None, cleanup=True): self.translator = translator @@ -147,8 +146,8 @@ # if this argument is set, the inliner will happily produce wrong code! # it is used by the exception transformation self.inline_guarded_calls_no_matter_what = inline_guarded_calls_no_matter_what - assert raise_analyzer is not None - self.raise_analyzer = raise_analyzer + assert can_raise is not None + self.can_raise = can_raise self.lltype_to_classdef = lltype_to_classdef self.call_count_pred = call_count_pred @@ -193,10 +192,10 @@ self.exception_guarded = True if self.inline_guarded_calls: if (not self.inline_guarded_calls_no_matter_what and - does_raise_directly(self.graph_to_inline, self.raise_analyzer)): + does_raise_directly(self.graph_to_inline, self.can_raise)): raise CannotInline("can't inline because the call is exception guarded") elif any_call_to_raising_graphs(self.graph_to_inline, - self.translator, self.raise_analyzer): + self.translator, self.can_raise): raise CannotInline("can't handle exceptions") self._passon_vars = {} self.entrymap = mkentrymap(self.graph_to_inline) @@ -423,13 +422,13 @@ def __init__(self, translator, graph, inline_func, lltype_to_classdef, inline_guarded_calls=False, inline_guarded_calls_no_matter_what=False, - raise_analyzer=None, + can_raise=None, call_count_pred=None, cleanup=True): BaseInliner.__init__(self, translator, graph, lltype_to_classdef, inline_guarded_calls, inline_guarded_calls_no_matter_what, - raise_analyzer, + can_raise, call_count_pred, cleanup) self.inline_func = inline_func @@ -629,7 +628,7 @@ valid_weight = {} try_again = {} lltype_to_classdef = translator.rtyper.lltype_to_classdef_mapping() - raise_analyzer = RaiseAnalyzer(translator) + can_raise = RaiseAnalyzer(translator).can_raise count = 0 while heap: weight, _, graph = heap[0] @@ -673,7 +672,7 @@ subcount = 0 try: subcount = inline_function(translator, graph, parentgraph, - lltype_to_classdef, raise_analyzer, + lltype_to_classdef, can_raise, call_count_pred, cleanup=False) to_cleanup[parentgraph] = True res = bool(subcount) diff --git a/rpython/translator/backendopt/test/test_inline.py b/rpython/translator/backendopt/test/test_inline.py --- a/rpython/translator/backendopt/test/test_inline.py +++ b/rpython/translator/backendopt/test/test_inline.py @@ -65,11 +65,11 @@ sanity_check(t) # also check before inlining (so we don't blame it) if option.view: t.view() - raise_analyzer = canraise.RaiseAnalyzer(t) + can_raise = canraise.RaiseAnalyzer(t).can_raise inliner = Inliner(t, graphof(t, in_func), func, t.rtyper.lltype_to_classdef_mapping(), inline_guarded_calls, - raise_analyzer=raise_analyzer) + can_raise=can_raise) inliner.inline_all() if option.view: t.view() diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -334,7 +334,7 @@ inliner = inline.OneShotInliner( self.translator, graph, self.lltype_to_classdef, inline_guarded_calls=True, inline_guarded_calls_no_matter_what=True, - raise_analyzer=self.raise_analyzer) + can_raise=self.raise_analyzer.can_raise) inliner.inline_once(block, len(block.operations)-1) #block.exits[0].exitcase = block.exits[0].llexitcase = False From noreply at buildbot.pypy.org Fri Oct 10 18:55:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 18:55:38 +0200 (CEST) Subject: [pypy-commit] pypy default: oups Message-ID: <20141010165538.8FE711D26BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73895:466c7d4301d8 Date: 2014-10-10 18:55 +0200 http://bitbucket.org/pypy/pypy/changeset/466c7d4301d8/ Log: oups diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -48,6 +48,8 @@ #define OP_INT_BETWEEN(a,b,c,r) r = (((Unsigned)b - (Unsigned)a) \ < ((Unsigned)c - (Unsigned)a)) +#define OP_INT_FORCE_GE_ZERO(a,r) r = (0 > a) ? 0 : (a) + /* addition, subtraction */ #define OP_INT_ADD(x,y,r) r = (x) + (y) From noreply at buildbot.pypy.org Fri Oct 10 19:39:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 19:39:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the test Message-ID: <20141010173945.E9D5E1D28DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73896:48ee7cfde056 Date: 2014-10-10 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/48ee7cfde056/ Log: Fix the test diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -333,11 +333,25 @@ a[-1] = n x1 = a[-1] a[n - n - 1] = n + 1 - return a[-1] + x1 + return a[-1] + x1 + 1000 * a[2] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 self.check_operations_history(setarrayitem_gc=2, - setfield_gc=0) + setfield_gc=2, call=0) + + def test_list_caching_negative_nonzero_init(self): + def fn(n): + a = [42] * n + if n > 1000: + a.append(0) + a[-1] = n + x1 = a[-1] + a[n - n - 1] = n + 1 + return a[-1] + x1 + 1000 * a[2] + res = self.interp_operations(fn, [7]) + assert res == 7 + 7 + 1 + 42000 + self.check_operations_history(setarrayitem_gc=2, + setfield_gc=0, call=1) def test_virtualizable_with_array_heap_cache(self): myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'x', 'i', 'frame'], From noreply at buildbot.pypy.org Fri Oct 10 20:00:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 20:00:01 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Remove the links "September only: double donations!" Message-ID: <20141010180001.BE7711D2909@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r547:465e28f2fb28 Date: 2014-10-10 19:59 +0200 http://bitbucket.org/pypy/pypy.org/changeset/465e28f2fb28/ Log: Remove the links "September only: double donations!" diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -20,7 +20,6 @@
    -
  • September only: double donations!
  • diff --git a/don2.html b/don2.html --- a/don2.html +++ b/don2.html @@ -8,7 +8,6 @@
  • -
  • September only: double donations!
  • diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -20,7 +20,6 @@
  • -
  • September only: double donations!
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -22,7 +22,6 @@ -
  • September only: double donations!
  • From noreply at buildbot.pypy.org Fri Oct 10 20:50:29 2014 From: noreply at buildbot.pypy.org (catseye) Date: Fri, 10 Oct 2014 20:50:29 +0200 (CEST) Subject: [pypy-commit] pypy default: The code in 'py' and '_pytest' is also under the MIT license. Message-ID: <20141010185029.693081C1347@cobra.cs.uni-duesseldorf.de> Author: Chris Pressey Branch: Changeset: r73897:933092cbcef7 Date: 2014-10-10 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/933092cbcef7/ Log: The code in 'py' and '_pytest' is also under the MIT license. diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -3,8 +3,8 @@ Except when otherwise stated (look for LICENSE files in directories or information at the beginning of each file) all software and documentation in -the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' -directories is licensed as follows: +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', 'lib_pypy', +'py', and '_pytest' directories is licensed as follows: The MIT License From noreply at buildbot.pypy.org Fri Oct 10 20:50:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 20:50:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in catseye/pypy (pull request #284) Message-ID: <20141010185030.8EC1F1C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73898:e242534d86cb Date: 2014-10-10 20:50 +0200 http://bitbucket.org/pypy/pypy/changeset/e242534d86cb/ Log: Merged in catseye/pypy (pull request #284) The code in 'py' and '_pytest' is also under the MIT license. diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -3,8 +3,8 @@ Except when otherwise stated (look for LICENSE files in directories or information at the beginning of each file) all software and documentation in -the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' -directories is licensed as follows: +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', 'lib_pypy', +'py', and '_pytest' directories is licensed as follows: The MIT License From noreply at buildbot.pypy.org Fri Oct 10 20:55:27 2014 From: noreply at buildbot.pypy.org (methane) Date: Fri, 10 Oct 2014 20:55:27 +0200 (CEST) Subject: [pypy-commit] cffi methane/homebrew-makes-symlink-for-current-versi-1412966022079: Homebrew makes symlink for current version in `/usr/local/opt`. Message-ID: <20141010185527.6DFD41C35AF@cobra.cs.uni-duesseldorf.de> Author: Naoki INADA Branch: methane/homebrew-makes-symlink-for-current-versi-1412966022079 Changeset: r1569:c3a7ec53ea6f Date: 2014-10-10 18:33 +0000 http://bitbucket.org/cffi/cffi/changeset/c3a7ec53ea6f/ Log: Homebrew makes symlink for current version in `/usr/local/opt`. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -150,8 +150,7 @@ :: brew install pkg-config libffi - export PKG_CONFIG_PATH=/usr/local/Cellar/libffi/3.0.13/lib/pkgconfig/ # May change with libffi version - pip install cffi + PKG_CONFIG_PATH=/usr/local/opt/libffi/lib/pkgconfig pip install cffi Aternatively, **on OS/X 10.6** (Thanks Juraj Sukop for this) From noreply at buildbot.pypy.org Fri Oct 10 20:55:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 20:55:28 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Merged in methane/cffi/methane/homebrew-makes-symlink-for-current-versi-1412966022079 (pull request #49) Message-ID: <20141010185528.B351D1C35AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1570:897d690a91e5 Date: 2014-10-10 20:55 +0200 http://bitbucket.org/cffi/cffi/changeset/897d690a91e5/ Log: Merged in methane/cffi/methane/homebrew-makes-symlink-for-current- versi-1412966022079 (pull request #49) Homebrew makes symlink for current version in `/usr/local/opt`. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -150,8 +150,7 @@ :: brew install pkg-config libffi - export PKG_CONFIG_PATH=/usr/local/Cellar/libffi/3.0.13/lib/pkgconfig/ # May change with libffi version - pip install cffi + PKG_CONFIG_PATH=/usr/local/opt/libffi/lib/pkgconfig pip install cffi Aternatively, **on OS/X 10.6** (Thanks Juraj Sukop for this) From noreply at buildbot.pypy.org Fri Oct 10 20:56:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Oct 2014 20:56:53 +0200 (CEST) Subject: [pypy-commit] cffi default: Merge this to default too Message-ID: <20141010185653.A94DB1C35AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1571:dd050d62ca8e Date: 2014-10-10 20:56 +0200 http://bitbucket.org/cffi/cffi/changeset/dd050d62ca8e/ Log: Merge this to default too diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -150,8 +150,7 @@ :: brew install pkg-config libffi - export PKG_CONFIG_PATH=/usr/local/Cellar/libffi/3.0.13/lib/pkgconfig/ # May change with libffi version - pip install cffi + PKG_CONFIG_PATH=/usr/local/opt/libffi/lib/pkgconfig pip install cffi Aternatively, **on OS/X 10.6** (Thanks Juraj Sukop for this) From noreply at buildbot.pypy.org Sat Oct 11 03:06:21 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 11 Oct 2014 03:06:21 +0200 (CEST) Subject: [pypy-commit] pypy default: merge ClassesPBCRepr and AbstractClassesPBCRepr Message-ID: <20141011010621.0D9DA1C35AF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r73899:adb060f6288a Date: 2014-10-11 02:04 +0100 http://bitbucket.org/pypy/pypy/changeset/adb060f6288a/ Log: merge ClassesPBCRepr and AbstractClassesPBCRepr diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -4,12 +4,12 @@ from rpython.rlib.debug import ll_assert from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper import callparse -from rpython.rtyper.lltypesystem import rclass, llmemory +from rpython.rtyper.lltypesystem import llmemory from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper.rpbc import ( - AbstractClassesPBCRepr, AbstractMultipleFrozenPBCRepr, + AbstractMultipleFrozenPBCRepr, AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, SingleFrozenPBCRepr, get_concrete_calltable) from rpython.rtyper.typesystem import getfunctionptr @@ -301,45 +301,3 @@ resulttype=Char) else: return v - -# ____________________________________________________________ - -class ClassesPBCRepr(AbstractClassesPBCRepr): - """Representation selected for a PBC of class(es).""" - - # no __init__ here, AbstractClassesPBCRepr.__init__ is good enough - - def _instantiate_runtime_class(self, hop, vtypeptr, r_instance): - graphs = [] - for desc in self.s_pbc.descriptions: - classdef = desc.getclassdef(None) - assert hasattr(classdef, 'my_instantiate_graph') - graphs.append(classdef.my_instantiate_graph) - c_graphs = hop.inputconst(Void, graphs) - # - # "my_instantiate = typeptr.instantiate" - c_name = hop.inputconst(Void, 'instantiate') - v_instantiate = hop.genop('getfield', [vtypeptr, c_name], - resulttype=rclass.OBJECT_VTABLE.instantiate) - # "my_instantiate()" - v_inst = hop.genop('indirect_call', [v_instantiate, c_graphs], - resulttype=rclass.OBJECTPTR) - return hop.genop('cast_pointer', [v_inst], resulttype=r_instance) - - def getlowleveltype(self): - return rclass.CLASSTYPE - - def get_ll_hash_function(self): - return ll_cls_hash - - get_ll_fasthash_function = get_ll_hash_function - - def get_ll_eq_function(self): - return None - - -def ll_cls_hash(cls): - if not cls: - return 0 - else: - return cls.hash diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -4,6 +4,8 @@ from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse +from rpython.rtyper.lltypesystem.rclass import ( + CLASSTYPE, OBJECT_VTABLE, OBJECTPTR) from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, @@ -22,7 +24,7 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): from rpython.rtyper.lltypesystem.rpbc import ( - FunctionsPBCRepr, SmallFunctionSetPBCRepr, ClassesPBCRepr) + FunctionsPBCRepr, SmallFunctionSetPBCRepr) kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -584,7 +586,7 @@ # ____________________________________________________________ -class AbstractClassesPBCRepr(Repr): +class ClassesPBCRepr(Repr): """Representation selected for a PBC of class(es).""" def __init__(self, rtyper, s_pbc): @@ -727,8 +729,42 @@ hop2.dispatch() return v_instance + def _instantiate_runtime_class(self, hop, vtypeptr, r_instance): + graphs = [] + for desc in self.s_pbc.descriptions: + classdef = desc.getclassdef(None) + assert hasattr(classdef, 'my_instantiate_graph') + graphs.append(classdef.my_instantiate_graph) + c_graphs = hop.inputconst(lltype.Void, graphs) + # + # "my_instantiate = typeptr.instantiate" + c_name = hop.inputconst(lltype.Void, 'instantiate') + v_instantiate = hop.genop('getfield', [vtypeptr, c_name], + resulttype=OBJECT_VTABLE.instantiate) + # "my_instantiate()" + v_inst = hop.genop('indirect_call', [v_instantiate, c_graphs], + resulttype=OBJECTPTR) + return hop.genop('cast_pointer', [v_inst], resulttype=r_instance) -class __extend__(pairtype(AbstractClassesPBCRepr, rclass.AbstractClassRepr)): + def getlowleveltype(self): + return CLASSTYPE + + def get_ll_hash_function(self): + return ll_cls_hash + + get_ll_fasthash_function = get_ll_hash_function + + def get_ll_eq_function(self): + return None + + +def ll_cls_hash(cls): + if not cls: + return 0 + else: + return cls.hash + +class __extend__(pairtype(ClassesPBCRepr, rclass.AbstractClassRepr)): def convert_from_to((r_clspbc, r_cls), v, llops): # turn a PBC of classes to a standard pointer-to-vtable class repr if r_clspbc.lowleveltype == r_cls.lowleveltype: @@ -738,7 +774,7 @@ # convert from ptr-to-object-vtable to ptr-to-more-precise-vtable return r_cls.fromclasstype(v, llops) -class __extend__(pairtype(AbstractClassesPBCRepr, AbstractClassesPBCRepr)): +class __extend__(pairtype(ClassesPBCRepr, ClassesPBCRepr)): def convert_from_to((r_clspbc1, r_clspbc2), v, llops): # this check makes sense because both source and dest repr are ClassesPBCRepr if r_clspbc1.lowleveltype == r_clspbc2.lowleveltype: From noreply at buildbot.pypy.org Sat Oct 11 03:29:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 11 Oct 2014 03:29:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix this test on 32bit Message-ID: <20141011012934.EEDA31D22A7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73900:25e3b114c5ab Date: 2014-10-10 21:29 -0400 http://bitbucket.org/pypy/pypy/changeset/25e3b114c5ab/ Log: fix this test on 32bit diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -211,7 +211,7 @@ assert np.result_type(1.) is np.dtype('float64') assert np.result_type(1+2j) is np.dtype('complex128') assert np.result_type(1, 1.) is np.dtype('float64') - assert np.result_type(np.array([1, 2])) is np.dtype('int64') + assert np.result_type(np.array([1, 2])) is np.dtype('int') assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128') assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64') assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64') From noreply at buildbot.pypy.org Sat Oct 11 09:26:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 11 Oct 2014 09:26:50 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20141011072650.EB8411D2909@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r548:d63ce4ff5749 Date: 2014-10-11 09:27 +0200 http://bitbucket.org/pypy/pypy.org/changeset/d63ce4ff5749/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $57748 of $105000 (55.0%) + $58058 of $105000 (55.3%)
    diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $50205 of $60000 (83.7%) + $50362 of $60000 (83.9%)
    diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $19237 of $80000 (24.0%) + $19629 of $80000 (24.5%)
    From noreply at buildbot.pypy.org Sat Oct 11 13:23:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 11 Oct 2014 13:23:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Crash, reported by MarkusH on irc Message-ID: <20141011112333.5B3721D37C0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73901:75ab5316ff3f Date: 2014-10-11 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/75ab5316ff3f/ Log: Crash, reported by MarkusH on irc diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -425,3 +425,8 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} + + def test_bug_null_in_objspace_type(self): + import ast + code = ast.Expression(lineno=1, col_offset=1, body=ast.ListComp(lineno=1, col_offset=1, elt=ast.Call(lineno=1, col_offset=1, func=ast.Name(lineno=1, col_offset=1, id='str', ctx=ast.Load(lineno=1, col_offset=1)), args=[ast.Name(lineno=1, col_offset=1, id='x', ctx=ast.Load(lineno=1, col_offset=1))], keywords=[]), generators=[ast.comprehension(lineno=1, col_offset=1, target=ast.Name(lineno=1, col_offset=1, id='x', ctx=ast.Store(lineno=1, col_offset=1)), iter=ast.List(lineno=1, col_offset=1, elts=[ast.Num(lineno=1, col_offset=1, n=23)], ctx=ast.Load(lineno=1, col_offset=1, )), ifs=[])])) + compile(code, '